xref: /freebsd/sys/kern/vfs_bio.c (revision 39ee7a7a6bdd1557b1c3532abf60d139798ac88b)
1 /*-
2  * Copyright (c) 2004 Poul-Henning Kamp
3  * Copyright (c) 1994,1997 John S. Dyson
4  * Copyright (c) 2013 The FreeBSD Foundation
5  * All rights reserved.
6  *
7  * Portions of this software were developed by Konstantin Belousov
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * this file contains a new buffer I/O scheme implementing a coherent
34  * VM object and buffer cache scheme.  Pains have been taken to make
35  * sure that the performance degradation associated with schemes such
36  * as this is not realized.
37  *
38  * Author:  John S. Dyson
39  * Significant help during the development and debugging phases
40  * had been provided by David Greenman, also of the FreeBSD core team.
41  *
42  * see man buf(9) for more info.
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/conf.h>
52 #include <sys/buf.h>
53 #include <sys/devicestat.h>
54 #include <sys/eventhandler.h>
55 #include <sys/fail.h>
56 #include <sys/limits.h>
57 #include <sys/lock.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/mutex.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
63 #include <sys/proc.h>
64 #include <sys/resourcevar.h>
65 #include <sys/rwlock.h>
66 #include <sys/smp.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysproto.h>
69 #include <sys/vmem.h>
70 #include <sys/vmmeter.h>
71 #include <sys/vnode.h>
72 #include <sys/watchdog.h>
73 #include <geom/geom.h>
74 #include <vm/vm.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_pageout.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_extern.h>
81 #include <vm/vm_map.h>
82 #include <vm/swap_pager.h>
83 #include "opt_compat.h"
84 #include "opt_swap.h"
85 
86 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
87 
88 struct	bio_ops bioops;		/* I/O operation notification */
89 
90 struct	buf_ops buf_ops_bio = {
91 	.bop_name	=	"buf_ops_bio",
92 	.bop_write	=	bufwrite,
93 	.bop_strategy	=	bufstrategy,
94 	.bop_sync	=	bufsync,
95 	.bop_bdflush	=	bufbdflush,
96 };
97 
98 static struct buf *buf;		/* buffer header pool */
99 extern struct buf *swbuf;	/* Swap buffer header pool. */
100 caddr_t unmapped_buf;
101 
102 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
103 struct proc *bufdaemonproc;
104 struct proc *bufspacedaemonproc;
105 
106 static int inmem(struct vnode *vp, daddr_t blkno);
107 static void vm_hold_free_pages(struct buf *bp, int newbsize);
108 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
109 		vm_offset_t to);
110 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
111 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
112 		vm_page_t m);
113 static void vfs_clean_pages_dirty_buf(struct buf *bp);
114 static void vfs_setdirty_locked_object(struct buf *bp);
115 static void vfs_vmio_invalidate(struct buf *bp);
116 static void vfs_vmio_truncate(struct buf *bp, int npages);
117 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
118 static int vfs_bio_clcheck(struct vnode *vp, int size,
119 		daddr_t lblkno, daddr_t blkno);
120 static int buf_flush(struct vnode *vp, int);
121 static int buf_recycle(bool);
122 static int buf_scan(bool);
123 static int flushbufqueues(struct vnode *, int, int);
124 static void buf_daemon(void);
125 static void bremfreel(struct buf *bp);
126 static __inline void bd_wakeup(void);
127 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
128 static void bufkva_reclaim(vmem_t *, int);
129 static void bufkva_free(struct buf *);
130 static int buf_import(void *, void **, int, int);
131 static void buf_release(void *, void **, int);
132 
133 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
134     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
135 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
136 #endif
137 
138 int vmiodirenable = TRUE;
139 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
140     "Use the VM system for directory writes");
141 long runningbufspace;
142 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
143     "Amount of presently outstanding async buffer io");
144 static long bufspace;
145 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
146     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
147 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
148     &bufspace, 0, sysctl_bufspace, "L", "Virtual memory used for buffers");
149 #else
150 SYSCTL_LONG(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
151     "Physical memory used for buffers");
152 #endif
153 static long bufkvaspace;
154 SYSCTL_LONG(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace, 0,
155     "Kernel virtual memory used for buffers");
156 static long maxbufspace;
157 SYSCTL_LONG(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, &maxbufspace, 0,
158     "Maximum allowed value of bufspace (including metadata)");
159 static long bufmallocspace;
160 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
161     "Amount of malloced memory for buffers");
162 static long maxbufmallocspace;
163 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
164     0, "Maximum amount of malloced memory for buffers");
165 static long lobufspace;
166 SYSCTL_LONG(_vfs, OID_AUTO, lobufspace, CTLFLAG_RW, &lobufspace, 0,
167     "Minimum amount of buffers we want to have");
168 long hibufspace;
169 SYSCTL_LONG(_vfs, OID_AUTO, hibufspace, CTLFLAG_RW, &hibufspace, 0,
170     "Maximum allowed value of bufspace (excluding metadata)");
171 long bufspacethresh;
172 SYSCTL_LONG(_vfs, OID_AUTO, bufspacethresh, CTLFLAG_RW, &bufspacethresh,
173     0, "Bufspace consumed before waking the daemon to free some");
174 static int buffreekvacnt;
175 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
176     "Number of times we have freed the KVA space from some buffer");
177 static int bufdefragcnt;
178 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
179     "Number of times we have had to repeat buffer allocation to defragment");
180 static long lorunningspace;
181 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
182     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
183     "Minimum preferred space used for in-progress I/O");
184 static long hirunningspace;
185 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
186     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
187     "Maximum amount of space to use for in-progress I/O");
188 int dirtybufferflushes;
189 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
190     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
191 int bdwriteskip;
192 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
193     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
194 int altbufferflushes;
195 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW, &altbufferflushes,
196     0, "Number of fsync flushes to limit dirty buffers");
197 static int recursiveflushes;
198 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW, &recursiveflushes,
199     0, "Number of flushes skipped due to being recursive");
200 static int numdirtybuffers;
201 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
202     "Number of buffers that are dirty (has unwritten changes) at the moment");
203 static int lodirtybuffers;
204 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
205     "How many buffers we want to have free before bufdaemon can sleep");
206 static int hidirtybuffers;
207 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
208     "When the number of dirty buffers is considered severe");
209 int dirtybufthresh;
210 SYSCTL_INT(_vfs, OID_AUTO, dirtybufthresh, CTLFLAG_RW, &dirtybufthresh,
211     0, "Number of bdwrite to bawrite conversions to clear dirty buffers");
212 static int numfreebuffers;
213 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
214     "Number of free buffers");
215 static int lofreebuffers;
216 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
217    "Target number of free buffers");
218 static int hifreebuffers;
219 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
220    "Threshold for clean buffer recycling");
221 static int getnewbufcalls;
222 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
223    "Number of calls to getnewbuf");
224 static int getnewbufrestarts;
225 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
226     "Number of times getnewbuf has had to restart a buffer aquisition");
227 static int mappingrestarts;
228 SYSCTL_INT(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RW, &mappingrestarts, 0,
229     "Number of times getblk has had to restart a buffer mapping for "
230     "unmapped buffer");
231 static int numbufallocfails;
232 SYSCTL_INT(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW, &numbufallocfails, 0,
233     "Number of times buffer allocations failed");
234 static int flushbufqtarget = 100;
235 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
236     "Amount of work to do in flushbufqueues when helping bufdaemon");
237 static long notbufdflushes;
238 SYSCTL_LONG(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes, 0,
239     "Number of dirty buffer flushes done by the bufdaemon helpers");
240 static long barrierwrites;
241 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW, &barrierwrites, 0,
242     "Number of barrier writes");
243 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
244     &unmapped_buf_allowed, 0,
245     "Permit the use of the unmapped i/o");
246 
247 /*
248  * This lock synchronizes access to bd_request.
249  */
250 static struct mtx_padalign bdlock;
251 
252 /*
253  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
254  * waitrunningbufspace().
255  */
256 static struct mtx_padalign rbreqlock;
257 
258 /*
259  * Lock that protects needsbuffer and the sleeps/wakeups surrounding it.
260  */
261 static struct rwlock_padalign nblock;
262 
263 /*
264  * Lock that protects bdirtywait.
265  */
266 static struct mtx_padalign bdirtylock;
267 
268 /*
269  * Wakeup point for bufdaemon, as well as indicator of whether it is already
270  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
271  * is idling.
272  */
273 static int bd_request;
274 
275 /*
276  * Request/wakeup point for the bufspace daemon.
277  */
278 static int bufspace_request;
279 
280 /*
281  * Request for the buf daemon to write more buffers than is indicated by
282  * lodirtybuf.  This may be necessary to push out excess dependencies or
283  * defragment the address space where a simple count of the number of dirty
284  * buffers is insufficient to characterize the demand for flushing them.
285  */
286 static int bd_speedupreq;
287 
288 /*
289  * bogus page -- for I/O to/from partially complete buffers
290  * this is a temporary solution to the problem, but it is not
291  * really that bad.  it would be better to split the buffer
292  * for input in the case of buffers partially already in memory,
293  * but the code is intricate enough already.
294  */
295 vm_page_t bogus_page;
296 
297 /*
298  * Synchronization (sleep/wakeup) variable for active buffer space requests.
299  * Set when wait starts, cleared prior to wakeup().
300  * Used in runningbufwakeup() and waitrunningbufspace().
301  */
302 static int runningbufreq;
303 
304 /*
305  * Synchronization (sleep/wakeup) variable for buffer requests.
306  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
307  * by and/or.
308  * Used in numdirtywakeup(), bufspace_wakeup(), bwillwrite(),
309  * getnewbuf(), and getblk().
310  */
311 static volatile int needsbuffer;
312 
313 /*
314  * Synchronization for bwillwrite() waiters.
315  */
316 static int bdirtywait;
317 
318 /*
319  * Definitions for the buffer free lists.
320  */
321 #define QUEUE_NONE	0	/* on no queue */
322 #define QUEUE_EMPTY	1	/* empty buffer headers */
323 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
324 #define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
325 #define QUEUE_SENTINEL	1024	/* not an queue index, but mark for sentinel */
326 
327 /* Maximum number of clean buffer queues. */
328 #define	CLEAN_QUEUES	16
329 
330 /* Configured number of clean queues. */
331 static int clean_queues;
332 
333 /* Maximum number of buffer queues. */
334 #define BUFFER_QUEUES	(QUEUE_CLEAN + CLEAN_QUEUES)
335 
336 /* Queues for free buffers with various properties */
337 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
338 #ifdef INVARIANTS
339 static int bq_len[BUFFER_QUEUES];
340 #endif
341 
342 /*
343  * Lock for each bufqueue
344  */
345 static struct mtx_padalign bqlocks[BUFFER_QUEUES];
346 
347 /*
348  * per-cpu empty buffer cache.
349  */
350 uma_zone_t buf_zone;
351 
352 /*
353  * Single global constant for BUF_WMESG, to avoid getting multiple references.
354  * buf_wmesg is referred from macros.
355  */
356 const char *buf_wmesg = BUF_WMESG;
357 
358 static int
359 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
360 {
361 	long value;
362 	int error;
363 
364 	value = *(long *)arg1;
365 	error = sysctl_handle_long(oidp, &value, 0, req);
366 	if (error != 0 || req->newptr == NULL)
367 		return (error);
368 	mtx_lock(&rbreqlock);
369 	if (arg1 == &hirunningspace) {
370 		if (value < lorunningspace)
371 			error = EINVAL;
372 		else
373 			hirunningspace = value;
374 	} else {
375 		KASSERT(arg1 == &lorunningspace,
376 		    ("%s: unknown arg1", __func__));
377 		if (value > hirunningspace)
378 			error = EINVAL;
379 		else
380 			lorunningspace = value;
381 	}
382 	mtx_unlock(&rbreqlock);
383 	return (error);
384 }
385 
386 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
387     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
388 static int
389 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
390 {
391 	long lvalue;
392 	int ivalue;
393 
394 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
395 		return (sysctl_handle_long(oidp, arg1, arg2, req));
396 	lvalue = *(long *)arg1;
397 	if (lvalue > INT_MAX)
398 		/* On overflow, still write out a long to trigger ENOMEM. */
399 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
400 	ivalue = lvalue;
401 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
402 }
403 #endif
404 
405 static int
406 bqcleanq(void)
407 {
408 	static int nextq;
409 
410 	return ((atomic_fetchadd_int(&nextq, 1) % clean_queues) + QUEUE_CLEAN);
411 }
412 
413 static int
414 bqisclean(int qindex)
415 {
416 
417 	return (qindex >= QUEUE_CLEAN && qindex < QUEUE_CLEAN + CLEAN_QUEUES);
418 }
419 
420 /*
421  *	bqlock:
422  *
423  *	Return the appropriate queue lock based on the index.
424  */
425 static inline struct mtx *
426 bqlock(int qindex)
427 {
428 
429 	return (struct mtx *)&bqlocks[qindex];
430 }
431 
432 /*
433  *	bdirtywakeup:
434  *
435  *	Wakeup any bwillwrite() waiters.
436  */
437 static void
438 bdirtywakeup(void)
439 {
440 	mtx_lock(&bdirtylock);
441 	if (bdirtywait) {
442 		bdirtywait = 0;
443 		wakeup(&bdirtywait);
444 	}
445 	mtx_unlock(&bdirtylock);
446 }
447 
448 /*
449  *	bdirtysub:
450  *
451  *	Decrement the numdirtybuffers count by one and wakeup any
452  *	threads blocked in bwillwrite().
453  */
454 static void
455 bdirtysub(void)
456 {
457 
458 	if (atomic_fetchadd_int(&numdirtybuffers, -1) ==
459 	    (lodirtybuffers + hidirtybuffers) / 2)
460 		bdirtywakeup();
461 }
462 
463 /*
464  *	bdirtyadd:
465  *
466  *	Increment the numdirtybuffers count by one and wakeup the buf
467  *	daemon if needed.
468  */
469 static void
470 bdirtyadd(void)
471 {
472 
473 	/*
474 	 * Only do the wakeup once as we cross the boundary.  The
475 	 * buf daemon will keep running until the condition clears.
476 	 */
477 	if (atomic_fetchadd_int(&numdirtybuffers, 1) ==
478 	    (lodirtybuffers + hidirtybuffers) / 2)
479 		bd_wakeup();
480 }
481 
482 /*
483  *	bufspace_wakeup:
484  *
485  *	Called when buffer space is potentially available for recovery.
486  *	getnewbuf() will block on this flag when it is unable to free
487  *	sufficient buffer space.  Buffer space becomes recoverable when
488  *	bp's get placed back in the queues.
489  */
490 static void
491 bufspace_wakeup(void)
492 {
493 
494 	/*
495 	 * If someone is waiting for bufspace, wake them up.
496 	 *
497 	 * Since needsbuffer is set prior to doing an additional queue
498 	 * scan it is safe to check for the flag prior to acquiring the
499 	 * lock.  The thread that is preparing to scan again before
500 	 * blocking would discover the buf we released.
501 	 */
502 	if (needsbuffer) {
503 		rw_rlock(&nblock);
504 		if (atomic_cmpset_int(&needsbuffer, 1, 0) == 1)
505 			wakeup(__DEVOLATILE(void *, &needsbuffer));
506 		rw_runlock(&nblock);
507 	}
508 }
509 
510 /*
511  *	bufspace_daemonwakeup:
512  *
513  *	Wakeup the daemon responsible for freeing clean bufs.
514  */
515 static void
516 bufspace_daemonwakeup(void)
517 {
518 	rw_rlock(&nblock);
519 	if (bufspace_request == 0) {
520 		bufspace_request = 1;
521 		wakeup(&bufspace_request);
522 	}
523 	rw_runlock(&nblock);
524 }
525 
526 /*
527  *	bufspace_adjust:
528  *
529  *	Adjust the reported bufspace for a KVA managed buffer, possibly
530  * 	waking any waiters.
531  */
532 static void
533 bufspace_adjust(struct buf *bp, int bufsize)
534 {
535 	long space;
536 	int diff;
537 
538 	KASSERT((bp->b_flags & B_MALLOC) == 0,
539 	    ("bufspace_adjust: malloc buf %p", bp));
540 	diff = bufsize - bp->b_bufsize;
541 	if (diff < 0) {
542 		atomic_subtract_long(&bufspace, -diff);
543 		bufspace_wakeup();
544 	} else {
545 		space = atomic_fetchadd_long(&bufspace, diff);
546 		/* Wake up the daemon on the transition. */
547 		if (space < bufspacethresh && space + diff >= bufspacethresh)
548 			bufspace_daemonwakeup();
549 	}
550 	bp->b_bufsize = bufsize;
551 }
552 
553 /*
554  *	bufspace_reserve:
555  *
556  *	Reserve bufspace before calling allocbuf().  metadata has a
557  *	different space limit than data.
558  */
559 static int
560 bufspace_reserve(int size, bool metadata)
561 {
562 	long limit;
563 	long space;
564 
565 	if (metadata)
566 		limit = maxbufspace;
567 	else
568 		limit = hibufspace;
569 	do {
570 		space = bufspace;
571 		if (space + size > limit)
572 			return (ENOSPC);
573 	} while (atomic_cmpset_long(&bufspace, space, space + size) == 0);
574 
575 	/* Wake up the daemon on the transition. */
576 	if (space < bufspacethresh && space + size >= bufspacethresh)
577 		bufspace_daemonwakeup();
578 
579 	return (0);
580 }
581 
582 /*
583  *	bufspace_release:
584  *
585  *	Release reserved bufspace after bufspace_adjust() has consumed it.
586  */
587 static void
588 bufspace_release(int size)
589 {
590 	atomic_subtract_long(&bufspace, size);
591 	bufspace_wakeup();
592 }
593 
594 /*
595  *	bufspace_wait:
596  *
597  *	Wait for bufspace, acting as the buf daemon if a locked vnode is
598  *	supplied.  needsbuffer must be set in a safe fashion prior to
599  *	polling for space.  The operation must be re-tried on return.
600  */
601 static void
602 bufspace_wait(struct vnode *vp, int gbflags, int slpflag, int slptimeo)
603 {
604 	struct thread *td;
605 	int error, fl, norunbuf;
606 
607 	if ((gbflags & GB_NOWAIT_BD) != 0)
608 		return;
609 
610 	td = curthread;
611 	rw_wlock(&nblock);
612 	while (needsbuffer != 0) {
613 		if (vp != NULL && vp->v_type != VCHR &&
614 		    (td->td_pflags & TDP_BUFNEED) == 0) {
615 			rw_wunlock(&nblock);
616 			/*
617 			 * getblk() is called with a vnode locked, and
618 			 * some majority of the dirty buffers may as
619 			 * well belong to the vnode.  Flushing the
620 			 * buffers there would make a progress that
621 			 * cannot be achieved by the buf_daemon, that
622 			 * cannot lock the vnode.
623 			 */
624 			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
625 			    (td->td_pflags & TDP_NORUNNINGBUF);
626 
627 			/*
628 			 * Play bufdaemon.  The getnewbuf() function
629 			 * may be called while the thread owns lock
630 			 * for another dirty buffer for the same
631 			 * vnode, which makes it impossible to use
632 			 * VOP_FSYNC() there, due to the buffer lock
633 			 * recursion.
634 			 */
635 			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
636 			fl = buf_flush(vp, flushbufqtarget);
637 			td->td_pflags &= norunbuf;
638 			rw_wlock(&nblock);
639 			if (fl != 0)
640 				continue;
641 			if (needsbuffer == 0)
642 				break;
643 		}
644 		error = rw_sleep(__DEVOLATILE(void *, &needsbuffer), &nblock,
645 		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
646 		if (error != 0)
647 			break;
648 	}
649 	rw_wunlock(&nblock);
650 }
651 
652 
653 /*
654  *	bufspace_daemon:
655  *
656  *	buffer space management daemon.  Tries to maintain some marginal
657  *	amount of free buffer space so that requesting processes neither
658  *	block nor work to reclaim buffers.
659  */
660 static void
661 bufspace_daemon(void)
662 {
663 	for (;;) {
664 		kproc_suspend_check(bufspacedaemonproc);
665 
666 		/*
667 		 * Free buffers from the clean queue until we meet our
668 		 * targets.
669 		 *
670 		 * Theory of operation:  The buffer cache is most efficient
671 		 * when some free buffer headers and space are always
672 		 * available to getnewbuf().  This daemon attempts to prevent
673 		 * the excessive blocking and synchronization associated
674 		 * with shortfall.  It goes through three phases according
675 		 * demand:
676 		 *
677 		 * 1)	The daemon wakes up voluntarily once per-second
678 		 *	during idle periods when the counters are below
679 		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
680 		 *
681 		 * 2)	The daemon wakes up as we cross the thresholds
682 		 *	ahead of any potential blocking.  This may bounce
683 		 *	slightly according to the rate of consumption and
684 		 *	release.
685 		 *
686 		 * 3)	The daemon and consumers are starved for working
687 		 *	clean buffers.  This is the 'bufspace' sleep below
688 		 *	which will inefficiently trade bufs with bqrelse
689 		 *	until we return to condition 2.
690 		 */
691 		while (bufspace > lobufspace ||
692 		    numfreebuffers < hifreebuffers) {
693 			if (buf_recycle(false) != 0) {
694 				atomic_set_int(&needsbuffer, 1);
695 				if (buf_recycle(false) != 0) {
696 					rw_wlock(&nblock);
697 					if (needsbuffer)
698 						rw_sleep(__DEVOLATILE(void *,
699 						    &needsbuffer), &nblock,
700 						    PRIBIO|PDROP, "bufspace",
701 						    hz/10);
702 					else
703 						rw_wunlock(&nblock);
704 				}
705 			}
706 			maybe_yield();
707 		}
708 
709 		/*
710 		 * Re-check our limits under the exclusive nblock.
711 		 */
712 		rw_wlock(&nblock);
713 		if (bufspace < bufspacethresh &&
714 		    numfreebuffers > lofreebuffers) {
715 			bufspace_request = 0;
716 			rw_sleep(&bufspace_request, &nblock, PRIBIO|PDROP,
717 			    "-", hz);
718 		} else
719 			rw_wunlock(&nblock);
720 	}
721 }
722 
723 static struct kproc_desc bufspace_kp = {
724 	"bufspacedaemon",
725 	bufspace_daemon,
726 	&bufspacedaemonproc
727 };
728 SYSINIT(bufspacedaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start,
729     &bufspace_kp);
730 
731 /*
732  *	bufmallocadjust:
733  *
734  *	Adjust the reported bufspace for a malloc managed buffer, possibly
735  *	waking any waiters.
736  */
737 static void
738 bufmallocadjust(struct buf *bp, int bufsize)
739 {
740 	int diff;
741 
742 	KASSERT((bp->b_flags & B_MALLOC) != 0,
743 	    ("bufmallocadjust: non-malloc buf %p", bp));
744 	diff = bufsize - bp->b_bufsize;
745 	if (diff < 0)
746 		atomic_subtract_long(&bufmallocspace, -diff);
747 	else
748 		atomic_add_long(&bufmallocspace, diff);
749 	bp->b_bufsize = bufsize;
750 }
751 
752 /*
753  *	runningwakeup:
754  *
755  *	Wake up processes that are waiting on asynchronous writes to fall
756  *	below lorunningspace.
757  */
758 static void
759 runningwakeup(void)
760 {
761 
762 	mtx_lock(&rbreqlock);
763 	if (runningbufreq) {
764 		runningbufreq = 0;
765 		wakeup(&runningbufreq);
766 	}
767 	mtx_unlock(&rbreqlock);
768 }
769 
770 /*
771  *	runningbufwakeup:
772  *
773  *	Decrement the outstanding write count according.
774  */
775 void
776 runningbufwakeup(struct buf *bp)
777 {
778 	long space, bspace;
779 
780 	bspace = bp->b_runningbufspace;
781 	if (bspace == 0)
782 		return;
783 	space = atomic_fetchadd_long(&runningbufspace, -bspace);
784 	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
785 	    space, bspace));
786 	bp->b_runningbufspace = 0;
787 	/*
788 	 * Only acquire the lock and wakeup on the transition from exceeding
789 	 * the threshold to falling below it.
790 	 */
791 	if (space < lorunningspace)
792 		return;
793 	if (space - bspace > lorunningspace)
794 		return;
795 	runningwakeup();
796 }
797 
798 /*
799  *	waitrunningbufspace()
800  *
801  *	runningbufspace is a measure of the amount of I/O currently
802  *	running.  This routine is used in async-write situations to
803  *	prevent creating huge backups of pending writes to a device.
804  *	Only asynchronous writes are governed by this function.
805  *
806  *	This does NOT turn an async write into a sync write.  It waits
807  *	for earlier writes to complete and generally returns before the
808  *	caller's write has reached the device.
809  */
810 void
811 waitrunningbufspace(void)
812 {
813 
814 	mtx_lock(&rbreqlock);
815 	while (runningbufspace > hirunningspace) {
816 		runningbufreq = 1;
817 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
818 	}
819 	mtx_unlock(&rbreqlock);
820 }
821 
822 
823 /*
824  *	vfs_buf_test_cache:
825  *
826  *	Called when a buffer is extended.  This function clears the B_CACHE
827  *	bit if the newly extended portion of the buffer does not contain
828  *	valid data.
829  */
830 static __inline void
831 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
832     vm_offset_t size, vm_page_t m)
833 {
834 
835 	VM_OBJECT_ASSERT_LOCKED(m->object);
836 	if (bp->b_flags & B_CACHE) {
837 		int base = (foff + off) & PAGE_MASK;
838 		if (vm_page_is_valid(m, base, size) == 0)
839 			bp->b_flags &= ~B_CACHE;
840 	}
841 }
842 
843 /* Wake up the buffer daemon if necessary */
844 static __inline void
845 bd_wakeup(void)
846 {
847 
848 	mtx_lock(&bdlock);
849 	if (bd_request == 0) {
850 		bd_request = 1;
851 		wakeup(&bd_request);
852 	}
853 	mtx_unlock(&bdlock);
854 }
855 
856 /*
857  * bd_speedup - speedup the buffer cache flushing code
858  */
859 void
860 bd_speedup(void)
861 {
862 	int needwake;
863 
864 	mtx_lock(&bdlock);
865 	needwake = 0;
866 	if (bd_speedupreq == 0 || bd_request == 0)
867 		needwake = 1;
868 	bd_speedupreq = 1;
869 	bd_request = 1;
870 	if (needwake)
871 		wakeup(&bd_request);
872 	mtx_unlock(&bdlock);
873 }
874 
875 #ifndef NSWBUF_MIN
876 #define	NSWBUF_MIN	16
877 #endif
878 
879 #ifdef __i386__
880 #define	TRANSIENT_DENOM	5
881 #else
882 #define	TRANSIENT_DENOM 10
883 #endif
884 
885 /*
886  * Calculating buffer cache scaling values and reserve space for buffer
887  * headers.  This is called during low level kernel initialization and
888  * may be called more then once.  We CANNOT write to the memory area
889  * being reserved at this time.
890  */
891 caddr_t
892 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
893 {
894 	int tuned_nbuf;
895 	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
896 
897 	/*
898 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
899 	 * PAGE_SIZE is >= 1K)
900 	 */
901 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
902 
903 	/*
904 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
905 	 * For the first 64MB of ram nominally allocate sufficient buffers to
906 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
907 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
908 	 * the buffer cache we limit the eventual kva reservation to
909 	 * maxbcache bytes.
910 	 *
911 	 * factor represents the 1/4 x ram conversion.
912 	 */
913 	if (nbuf == 0) {
914 		int factor = 4 * BKVASIZE / 1024;
915 
916 		nbuf = 50;
917 		if (physmem_est > 4096)
918 			nbuf += min((physmem_est - 4096) / factor,
919 			    65536 / factor);
920 		if (physmem_est > 65536)
921 			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
922 			    32 * 1024 * 1024 / (factor * 5));
923 
924 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
925 			nbuf = maxbcache / BKVASIZE;
926 		tuned_nbuf = 1;
927 	} else
928 		tuned_nbuf = 0;
929 
930 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
931 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
932 	if (nbuf > maxbuf) {
933 		if (!tuned_nbuf)
934 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
935 			    maxbuf);
936 		nbuf = maxbuf;
937 	}
938 
939 	/*
940 	 * Ideal allocation size for the transient bio submap is 10%
941 	 * of the maximal space buffer map.  This roughly corresponds
942 	 * to the amount of the buffer mapped for typical UFS load.
943 	 *
944 	 * Clip the buffer map to reserve space for the transient
945 	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
946 	 * maximum buffer map extent on the platform.
947 	 *
948 	 * The fall-back to the maxbuf in case of maxbcache unset,
949 	 * allows to not trim the buffer KVA for the architectures
950 	 * with ample KVA space.
951 	 */
952 	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
953 		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
954 		buf_sz = (long)nbuf * BKVASIZE;
955 		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
956 		    (TRANSIENT_DENOM - 1)) {
957 			/*
958 			 * There is more KVA than memory.  Do not
959 			 * adjust buffer map size, and assign the rest
960 			 * of maxbuf to transient map.
961 			 */
962 			biotmap_sz = maxbuf_sz - buf_sz;
963 		} else {
964 			/*
965 			 * Buffer map spans all KVA we could afford on
966 			 * this platform.  Give 10% (20% on i386) of
967 			 * the buffer map to the transient bio map.
968 			 */
969 			biotmap_sz = buf_sz / TRANSIENT_DENOM;
970 			buf_sz -= biotmap_sz;
971 		}
972 		if (biotmap_sz / INT_MAX > MAXPHYS)
973 			bio_transient_maxcnt = INT_MAX;
974 		else
975 			bio_transient_maxcnt = biotmap_sz / MAXPHYS;
976 		/*
977 		 * Artifically limit to 1024 simultaneous in-flight I/Os
978 		 * using the transient mapping.
979 		 */
980 		if (bio_transient_maxcnt > 1024)
981 			bio_transient_maxcnt = 1024;
982 		if (tuned_nbuf)
983 			nbuf = buf_sz / BKVASIZE;
984 	}
985 
986 	/*
987 	 * swbufs are used as temporary holders for I/O, such as paging I/O.
988 	 * We have no less then 16 and no more then 256.
989 	 */
990 	nswbuf = min(nbuf / 4, 256);
991 	TUNABLE_INT_FETCH("kern.nswbuf", &nswbuf);
992 	if (nswbuf < NSWBUF_MIN)
993 		nswbuf = NSWBUF_MIN;
994 
995 	/*
996 	 * Reserve space for the buffer cache buffers
997 	 */
998 	swbuf = (void *)v;
999 	v = (caddr_t)(swbuf + nswbuf);
1000 	buf = (void *)v;
1001 	v = (caddr_t)(buf + nbuf);
1002 
1003 	return(v);
1004 }
1005 
1006 /* Initialize the buffer subsystem.  Called before use of any buffers. */
1007 void
1008 bufinit(void)
1009 {
1010 	struct buf *bp;
1011 	int i;
1012 
1013 	CTASSERT(MAXBCACHEBUF >= MAXBSIZE);
1014 	mtx_init(&bqlocks[QUEUE_DIRTY], "bufq dirty lock", NULL, MTX_DEF);
1015 	mtx_init(&bqlocks[QUEUE_EMPTY], "bufq empty lock", NULL, MTX_DEF);
1016 	for (i = QUEUE_CLEAN; i < QUEUE_CLEAN + CLEAN_QUEUES; i++)
1017 		mtx_init(&bqlocks[i], "bufq clean lock", NULL, MTX_DEF);
1018 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1019 	rw_init(&nblock, "needsbuffer lock");
1020 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1021 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1022 
1023 	/* next, make a null set of free lists */
1024 	for (i = 0; i < BUFFER_QUEUES; i++)
1025 		TAILQ_INIT(&bufqueues[i]);
1026 
1027 	unmapped_buf = (caddr_t)kva_alloc(MAXPHYS);
1028 
1029 	/* finally, initialize each buffer header and stick on empty q */
1030 	for (i = 0; i < nbuf; i++) {
1031 		bp = &buf[i];
1032 		bzero(bp, sizeof *bp);
1033 		bp->b_flags = B_INVAL;
1034 		bp->b_rcred = NOCRED;
1035 		bp->b_wcred = NOCRED;
1036 		bp->b_qindex = QUEUE_EMPTY;
1037 		bp->b_xflags = 0;
1038 		bp->b_data = bp->b_kvabase = unmapped_buf;
1039 		LIST_INIT(&bp->b_dep);
1040 		BUF_LOCKINIT(bp);
1041 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
1042 #ifdef INVARIANTS
1043 		bq_len[QUEUE_EMPTY]++;
1044 #endif
1045 	}
1046 
1047 	/*
1048 	 * maxbufspace is the absolute maximum amount of buffer space we are
1049 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1050 	 * is nominally used by metadata.  hibufspace is the nominal maximum
1051 	 * used by most other requests.  The differential is required to
1052 	 * ensure that metadata deadlocks don't occur.
1053 	 *
1054 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1055 	 * this may result in KVM fragmentation which is not handled optimally
1056 	 * by the system. XXX This is less true with vmem.  We could use
1057 	 * PAGE_SIZE.
1058 	 */
1059 	maxbufspace = (long)nbuf * BKVASIZE;
1060 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - MAXBCACHEBUF * 10);
1061 	lobufspace = (hibufspace / 20) * 19; /* 95% */
1062 	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1063 
1064 	/*
1065 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1066 	 * arbitrarily and may need further tuning. It corresponds to
1067 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1068 	 * which fits with many RAID controllers' tagged queuing limits.
1069 	 * The lower 1 MiB limit is the historical upper limit for
1070 	 * hirunningspace.
1071 	 */
1072 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, MAXBCACHEBUF),
1073 	    16 * 1024 * 1024), 1024 * 1024);
1074 	lorunningspace = roundup((hirunningspace * 2) / 3, MAXBCACHEBUF);
1075 
1076 	/*
1077 	 * Limit the amount of malloc memory since it is wired permanently into
1078 	 * the kernel space.  Even though this is accounted for in the buffer
1079 	 * allocation, we don't want the malloced region to grow uncontrolled.
1080 	 * The malloc scheme improves memory utilization significantly on
1081 	 * average (small) directories.
1082 	 */
1083 	maxbufmallocspace = hibufspace / 20;
1084 
1085 	/*
1086 	 * Reduce the chance of a deadlock occuring by limiting the number
1087 	 * of delayed-write dirty buffers we allow to stack up.
1088 	 */
1089 	hidirtybuffers = nbuf / 4 + 20;
1090 	dirtybufthresh = hidirtybuffers * 9 / 10;
1091 	numdirtybuffers = 0;
1092 	/*
1093 	 * To support extreme low-memory systems, make sure hidirtybuffers
1094 	 * cannot eat up all available buffer space.  This occurs when our
1095 	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1096 	 * buffer space assuming BKVASIZE'd buffers.
1097 	 */
1098 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1099 		hidirtybuffers >>= 1;
1100 	}
1101 	lodirtybuffers = hidirtybuffers / 2;
1102 
1103 	/*
1104 	 * lofreebuffers should be sufficient to avoid stalling waiting on
1105 	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1106 	 * are counted as free but will be unavailable to threads executing
1107 	 * on other cpus.
1108 	 *
1109 	 * hifreebuffers is the free target for the bufspace daemon.  This
1110 	 * should be set appropriately to limit work per-iteration.
1111 	 */
1112 	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1113 	hifreebuffers = (3 * lofreebuffers) / 2;
1114 	numfreebuffers = nbuf;
1115 
1116 	bogus_page = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ |
1117 	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
1118 
1119 	/* Setup the kva and free list allocators. */
1120 	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1121 	buf_zone = uma_zcache_create("buf free cache", sizeof(struct buf),
1122 	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1123 
1124 	/*
1125 	 * Size the clean queue according to the amount of buffer space.
1126 	 * One queue per-256mb up to the max.  More queues gives better
1127 	 * concurrency but less accurate LRU.
1128 	 */
1129 	clean_queues = MIN(howmany(maxbufspace, 256*1024*1024), CLEAN_QUEUES);
1130 
1131 }
1132 
1133 #ifdef INVARIANTS
1134 static inline void
1135 vfs_buf_check_mapped(struct buf *bp)
1136 {
1137 
1138 	KASSERT(bp->b_kvabase != unmapped_buf,
1139 	    ("mapped buf: b_kvabase was not updated %p", bp));
1140 	KASSERT(bp->b_data != unmapped_buf,
1141 	    ("mapped buf: b_data was not updated %p", bp));
1142 	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1143 	    MAXPHYS, ("b_data + b_offset unmapped %p", bp));
1144 }
1145 
1146 static inline void
1147 vfs_buf_check_unmapped(struct buf *bp)
1148 {
1149 
1150 	KASSERT(bp->b_data == unmapped_buf,
1151 	    ("unmapped buf: corrupted b_data %p", bp));
1152 }
1153 
1154 #define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1155 #define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1156 #else
1157 #define	BUF_CHECK_MAPPED(bp) do {} while (0)
1158 #define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1159 #endif
1160 
1161 static int
1162 isbufbusy(struct buf *bp)
1163 {
1164 	if (((bp->b_flags & (B_INVAL | B_PERSISTENT)) == 0 &&
1165 	    BUF_ISLOCKED(bp)) ||
1166 	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1167 		return (1);
1168 	return (0);
1169 }
1170 
1171 /*
1172  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1173  */
1174 void
1175 bufshutdown(int show_busybufs)
1176 {
1177 	static int first_buf_printf = 1;
1178 	struct buf *bp;
1179 	int iter, nbusy, pbusy;
1180 #ifndef PREEMPTION
1181 	int subiter;
1182 #endif
1183 
1184 	/*
1185 	 * Sync filesystems for shutdown
1186 	 */
1187 	wdog_kern_pat(WD_LASTVAL);
1188 	sys_sync(curthread, NULL);
1189 
1190 	/*
1191 	 * With soft updates, some buffers that are
1192 	 * written will be remarked as dirty until other
1193 	 * buffers are written.
1194 	 */
1195 	for (iter = pbusy = 0; iter < 20; iter++) {
1196 		nbusy = 0;
1197 		for (bp = &buf[nbuf]; --bp >= buf; )
1198 			if (isbufbusy(bp))
1199 				nbusy++;
1200 		if (nbusy == 0) {
1201 			if (first_buf_printf)
1202 				printf("All buffers synced.");
1203 			break;
1204 		}
1205 		if (first_buf_printf) {
1206 			printf("Syncing disks, buffers remaining... ");
1207 			first_buf_printf = 0;
1208 		}
1209 		printf("%d ", nbusy);
1210 		if (nbusy < pbusy)
1211 			iter = 0;
1212 		pbusy = nbusy;
1213 
1214 		wdog_kern_pat(WD_LASTVAL);
1215 		sys_sync(curthread, NULL);
1216 
1217 #ifdef PREEMPTION
1218 		/*
1219 		 * Drop Giant and spin for a while to allow
1220 		 * interrupt threads to run.
1221 		 */
1222 		DROP_GIANT();
1223 		DELAY(50000 * iter);
1224 		PICKUP_GIANT();
1225 #else
1226 		/*
1227 		 * Drop Giant and context switch several times to
1228 		 * allow interrupt threads to run.
1229 		 */
1230 		DROP_GIANT();
1231 		for (subiter = 0; subiter < 50 * iter; subiter++) {
1232 			thread_lock(curthread);
1233 			mi_switch(SW_VOL, NULL);
1234 			thread_unlock(curthread);
1235 			DELAY(1000);
1236 		}
1237 		PICKUP_GIANT();
1238 #endif
1239 	}
1240 	printf("\n");
1241 	/*
1242 	 * Count only busy local buffers to prevent forcing
1243 	 * a fsck if we're just a client of a wedged NFS server
1244 	 */
1245 	nbusy = 0;
1246 	for (bp = &buf[nbuf]; --bp >= buf; ) {
1247 		if (isbufbusy(bp)) {
1248 #if 0
1249 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1250 			if (bp->b_dev == NULL) {
1251 				TAILQ_REMOVE(&mountlist,
1252 				    bp->b_vp->v_mount, mnt_list);
1253 				continue;
1254 			}
1255 #endif
1256 			nbusy++;
1257 			if (show_busybufs > 0) {
1258 				printf(
1259 	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1260 				    nbusy, bp, bp->b_vp, bp->b_flags,
1261 				    (intmax_t)bp->b_blkno,
1262 				    (intmax_t)bp->b_lblkno);
1263 				BUF_LOCKPRINTINFO(bp);
1264 				if (show_busybufs > 1)
1265 					vn_printf(bp->b_vp,
1266 					    "vnode content: ");
1267 			}
1268 		}
1269 	}
1270 	if (nbusy) {
1271 		/*
1272 		 * Failed to sync all blocks. Indicate this and don't
1273 		 * unmount filesystems (thus forcing an fsck on reboot).
1274 		 */
1275 		printf("Giving up on %d buffers\n", nbusy);
1276 		DELAY(5000000);	/* 5 seconds */
1277 	} else {
1278 		if (!first_buf_printf)
1279 			printf("Final sync complete\n");
1280 		/*
1281 		 * Unmount filesystems
1282 		 */
1283 		if (panicstr == 0)
1284 			vfs_unmountall();
1285 	}
1286 	swapoff_all();
1287 	DELAY(100000);		/* wait for console output to finish */
1288 }
1289 
1290 static void
1291 bpmap_qenter(struct buf *bp)
1292 {
1293 
1294 	BUF_CHECK_MAPPED(bp);
1295 
1296 	/*
1297 	 * bp->b_data is relative to bp->b_offset, but
1298 	 * bp->b_offset may be offset into the first page.
1299 	 */
1300 	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1301 	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1302 	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1303 	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1304 }
1305 
1306 /*
1307  *	binsfree:
1308  *
1309  *	Insert the buffer into the appropriate free list.
1310  */
1311 static void
1312 binsfree(struct buf *bp, int qindex)
1313 {
1314 	struct mtx *olock, *nlock;
1315 
1316 	if (qindex != QUEUE_EMPTY) {
1317 		BUF_ASSERT_XLOCKED(bp);
1318 	}
1319 
1320 	/*
1321 	 * Stick to the same clean queue for the lifetime of the buf to
1322 	 * limit locking below.  Otherwise pick ont sequentially.
1323 	 */
1324 	if (qindex == QUEUE_CLEAN) {
1325 		if (bqisclean(bp->b_qindex))
1326 			qindex = bp->b_qindex;
1327 		else
1328 			qindex = bqcleanq();
1329 	}
1330 
1331 	/*
1332 	 * Handle delayed bremfree() processing.
1333 	 */
1334 	nlock = bqlock(qindex);
1335 	if (bp->b_flags & B_REMFREE) {
1336 		olock = bqlock(bp->b_qindex);
1337 		mtx_lock(olock);
1338 		bremfreel(bp);
1339 		if (olock != nlock) {
1340 			mtx_unlock(olock);
1341 			mtx_lock(nlock);
1342 		}
1343 	} else
1344 		mtx_lock(nlock);
1345 
1346 	if (bp->b_qindex != QUEUE_NONE)
1347 		panic("binsfree: free buffer onto another queue???");
1348 
1349 	bp->b_qindex = qindex;
1350 	if (bp->b_flags & B_AGE)
1351 		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1352 	else
1353 		TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1354 #ifdef INVARIANTS
1355 	bq_len[bp->b_qindex]++;
1356 #endif
1357 	mtx_unlock(nlock);
1358 }
1359 
1360 /*
1361  * buf_free:
1362  *
1363  *	Free a buffer to the buf zone once it no longer has valid contents.
1364  */
1365 static void
1366 buf_free(struct buf *bp)
1367 {
1368 
1369 	if (bp->b_flags & B_REMFREE)
1370 		bremfreef(bp);
1371 	if (bp->b_vflags & BV_BKGRDINPROG)
1372 		panic("losing buffer 1");
1373 	if (bp->b_rcred != NOCRED) {
1374 		crfree(bp->b_rcred);
1375 		bp->b_rcred = NOCRED;
1376 	}
1377 	if (bp->b_wcred != NOCRED) {
1378 		crfree(bp->b_wcred);
1379 		bp->b_wcred = NOCRED;
1380 	}
1381 	if (!LIST_EMPTY(&bp->b_dep))
1382 		buf_deallocate(bp);
1383 	bufkva_free(bp);
1384 	BUF_UNLOCK(bp);
1385 	uma_zfree(buf_zone, bp);
1386 	atomic_add_int(&numfreebuffers, 1);
1387 	bufspace_wakeup();
1388 }
1389 
1390 /*
1391  * buf_import:
1392  *
1393  *	Import bufs into the uma cache from the buf list.  The system still
1394  *	expects a static array of bufs and much of the synchronization
1395  *	around bufs assumes type stable storage.  As a result, UMA is used
1396  *	only as a per-cpu cache of bufs still maintained on a global list.
1397  */
1398 static int
1399 buf_import(void *arg, void **store, int cnt, int flags)
1400 {
1401 	struct buf *bp;
1402 	int i;
1403 
1404 	mtx_lock(&bqlocks[QUEUE_EMPTY]);
1405 	for (i = 0; i < cnt; i++) {
1406 		bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1407 		if (bp == NULL)
1408 			break;
1409 		bremfreel(bp);
1410 		store[i] = bp;
1411 	}
1412 	mtx_unlock(&bqlocks[QUEUE_EMPTY]);
1413 
1414 	return (i);
1415 }
1416 
1417 /*
1418  * buf_release:
1419  *
1420  *	Release bufs from the uma cache back to the buffer queues.
1421  */
1422 static void
1423 buf_release(void *arg, void **store, int cnt)
1424 {
1425         int i;
1426 
1427         for (i = 0; i < cnt; i++)
1428 		binsfree(store[i], QUEUE_EMPTY);
1429 }
1430 
1431 /*
1432  * buf_alloc:
1433  *
1434  *	Allocate an empty buffer header.
1435  */
1436 static struct buf *
1437 buf_alloc(void)
1438 {
1439 	struct buf *bp;
1440 
1441 	bp = uma_zalloc(buf_zone, M_NOWAIT);
1442 	if (bp == NULL) {
1443 		bufspace_daemonwakeup();
1444 		atomic_add_int(&numbufallocfails, 1);
1445 		return (NULL);
1446 	}
1447 
1448 	/*
1449 	 * Wake-up the bufspace daemon on transition.
1450 	 */
1451 	if (atomic_fetchadd_int(&numfreebuffers, -1) == lofreebuffers)
1452 		bufspace_daemonwakeup();
1453 
1454 	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1455 		panic("getnewbuf_empty: Locked buf %p on free queue.", bp);
1456 
1457 	KASSERT(bp->b_vp == NULL,
1458 	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1459 	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1460 	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1461 	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1462 	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1463 	KASSERT(bp->b_npages == 0,
1464 	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1465 	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1466 	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1467 
1468 	bp->b_flags = 0;
1469 	bp->b_ioflags = 0;
1470 	bp->b_xflags = 0;
1471 	bp->b_vflags = 0;
1472 	bp->b_vp = NULL;
1473 	bp->b_blkno = bp->b_lblkno = 0;
1474 	bp->b_offset = NOOFFSET;
1475 	bp->b_iodone = 0;
1476 	bp->b_error = 0;
1477 	bp->b_resid = 0;
1478 	bp->b_bcount = 0;
1479 	bp->b_npages = 0;
1480 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1481 	bp->b_bufobj = NULL;
1482 	bp->b_pin_count = 0;
1483 	bp->b_data = bp->b_kvabase = unmapped_buf;
1484 	bp->b_fsprivate1 = NULL;
1485 	bp->b_fsprivate2 = NULL;
1486 	bp->b_fsprivate3 = NULL;
1487 	LIST_INIT(&bp->b_dep);
1488 
1489 	return (bp);
1490 }
1491 
1492 /*
1493  *	buf_qrecycle:
1494  *
1495  *	Free a buffer from the given bufqueue.  kva controls whether the
1496  *	freed buf must own some kva resources.  This is used for
1497  *	defragmenting.
1498  */
1499 static int
1500 buf_qrecycle(int qindex, bool kva)
1501 {
1502 	struct buf *bp, *nbp;
1503 
1504 	if (kva)
1505 		atomic_add_int(&bufdefragcnt, 1);
1506 	nbp = NULL;
1507 	mtx_lock(&bqlocks[qindex]);
1508 	nbp = TAILQ_FIRST(&bufqueues[qindex]);
1509 
1510 	/*
1511 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1512 	 * depending.
1513 	 */
1514 	while ((bp = nbp) != NULL) {
1515 		/*
1516 		 * Calculate next bp (we can only use it if we do not
1517 		 * release the bqlock).
1518 		 */
1519 		nbp = TAILQ_NEXT(bp, b_freelist);
1520 
1521 		/*
1522 		 * If we are defragging then we need a buffer with
1523 		 * some kva to reclaim.
1524 		 */
1525 		if (kva && bp->b_kvasize == 0)
1526 			continue;
1527 
1528 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1529 			continue;
1530 
1531 		/*
1532 		 * Skip buffers with background writes in progress.
1533 		 */
1534 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1535 			BUF_UNLOCK(bp);
1536 			continue;
1537 		}
1538 
1539 		KASSERT(bp->b_qindex == qindex,
1540 		    ("getnewbuf: inconsistent queue %d bp %p", qindex, bp));
1541 		/*
1542 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1543 		 * the scan from this point on.
1544 		 */
1545 		bremfreel(bp);
1546 		mtx_unlock(&bqlocks[qindex]);
1547 
1548 		/*
1549 		 * Requeue the background write buffer with error and
1550 		 * restart the scan.
1551 		 */
1552 		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1553 			bqrelse(bp);
1554 			mtx_lock(&bqlocks[qindex]);
1555 			nbp = TAILQ_FIRST(&bufqueues[qindex]);
1556 			continue;
1557 		}
1558 		bp->b_flags |= B_INVAL;
1559 		brelse(bp);
1560 		return (0);
1561 	}
1562 	mtx_unlock(&bqlocks[qindex]);
1563 
1564 	return (ENOBUFS);
1565 }
1566 
1567 /*
1568  *	buf_recycle:
1569  *
1570  *	Iterate through all clean queues until we find a buf to recycle or
1571  *	exhaust the search.
1572  */
1573 static int
1574 buf_recycle(bool kva)
1575 {
1576 	int qindex, first_qindex;
1577 
1578 	qindex = first_qindex = bqcleanq();
1579 	do {
1580 		if (buf_qrecycle(qindex, kva) == 0)
1581 			return (0);
1582 		if (++qindex == QUEUE_CLEAN + clean_queues)
1583 			qindex = QUEUE_CLEAN;
1584 	} while (qindex != first_qindex);
1585 
1586 	return (ENOBUFS);
1587 }
1588 
1589 /*
1590  *	buf_scan:
1591  *
1592  *	Scan the clean queues looking for a buffer to recycle.  needsbuffer
1593  *	is set on failure so that the caller may optionally bufspace_wait()
1594  *	in a race-free fashion.
1595  */
1596 static int
1597 buf_scan(bool defrag)
1598 {
1599 	int error;
1600 
1601 	/*
1602 	 * To avoid heavy synchronization and wakeup races we set
1603 	 * needsbuffer and re-poll before failing.  This ensures that
1604 	 * no frees can be missed between an unsuccessful poll and
1605 	 * going to sleep in a synchronized fashion.
1606 	 */
1607 	if ((error = buf_recycle(defrag)) != 0) {
1608 		atomic_set_int(&needsbuffer, 1);
1609 		bufspace_daemonwakeup();
1610 		error = buf_recycle(defrag);
1611 	}
1612 	if (error == 0)
1613 		atomic_add_int(&getnewbufrestarts, 1);
1614 	return (error);
1615 }
1616 
1617 /*
1618  *	bremfree:
1619  *
1620  *	Mark the buffer for removal from the appropriate free list.
1621  *
1622  */
1623 void
1624 bremfree(struct buf *bp)
1625 {
1626 
1627 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1628 	KASSERT((bp->b_flags & B_REMFREE) == 0,
1629 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1630 	KASSERT(bp->b_qindex != QUEUE_NONE,
1631 	    ("bremfree: buffer %p not on a queue.", bp));
1632 	BUF_ASSERT_XLOCKED(bp);
1633 
1634 	bp->b_flags |= B_REMFREE;
1635 }
1636 
1637 /*
1638  *	bremfreef:
1639  *
1640  *	Force an immediate removal from a free list.  Used only in nfs when
1641  *	it abuses the b_freelist pointer.
1642  */
1643 void
1644 bremfreef(struct buf *bp)
1645 {
1646 	struct mtx *qlock;
1647 
1648 	qlock = bqlock(bp->b_qindex);
1649 	mtx_lock(qlock);
1650 	bremfreel(bp);
1651 	mtx_unlock(qlock);
1652 }
1653 
1654 /*
1655  *	bremfreel:
1656  *
1657  *	Removes a buffer from the free list, must be called with the
1658  *	correct qlock held.
1659  */
1660 static void
1661 bremfreel(struct buf *bp)
1662 {
1663 
1664 	CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X",
1665 	    bp, bp->b_vp, bp->b_flags);
1666 	KASSERT(bp->b_qindex != QUEUE_NONE,
1667 	    ("bremfreel: buffer %p not on a queue.", bp));
1668 	if (bp->b_qindex != QUEUE_EMPTY) {
1669 		BUF_ASSERT_XLOCKED(bp);
1670 	}
1671 	mtx_assert(bqlock(bp->b_qindex), MA_OWNED);
1672 
1673 	TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
1674 #ifdef INVARIANTS
1675 	KASSERT(bq_len[bp->b_qindex] >= 1, ("queue %d underflow",
1676 	    bp->b_qindex));
1677 	bq_len[bp->b_qindex]--;
1678 #endif
1679 	bp->b_qindex = QUEUE_NONE;
1680 	bp->b_flags &= ~B_REMFREE;
1681 }
1682 
1683 /*
1684  *	bufkva_free:
1685  *
1686  *	Free the kva allocation for a buffer.
1687  *
1688  */
1689 static void
1690 bufkva_free(struct buf *bp)
1691 {
1692 
1693 #ifdef INVARIANTS
1694 	if (bp->b_kvasize == 0) {
1695 		KASSERT(bp->b_kvabase == unmapped_buf &&
1696 		    bp->b_data == unmapped_buf,
1697 		    ("Leaked KVA space on %p", bp));
1698 	} else if (buf_mapped(bp))
1699 		BUF_CHECK_MAPPED(bp);
1700 	else
1701 		BUF_CHECK_UNMAPPED(bp);
1702 #endif
1703 	if (bp->b_kvasize == 0)
1704 		return;
1705 
1706 	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
1707 	atomic_subtract_long(&bufkvaspace, bp->b_kvasize);
1708 	atomic_add_int(&buffreekvacnt, 1);
1709 	bp->b_data = bp->b_kvabase = unmapped_buf;
1710 	bp->b_kvasize = 0;
1711 }
1712 
1713 /*
1714  *	bufkva_alloc:
1715  *
1716  *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
1717  */
1718 static int
1719 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
1720 {
1721 	vm_offset_t addr;
1722 	int error;
1723 
1724 	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
1725 	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
1726 
1727 	bufkva_free(bp);
1728 
1729 	addr = 0;
1730 	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
1731 	if (error != 0) {
1732 		/*
1733 		 * Buffer map is too fragmented.  Request the caller
1734 		 * to defragment the map.
1735 		 */
1736 		return (error);
1737 	}
1738 	bp->b_kvabase = (caddr_t)addr;
1739 	bp->b_kvasize = maxsize;
1740 	atomic_add_long(&bufkvaspace, bp->b_kvasize);
1741 	if ((gbflags & GB_UNMAPPED) != 0) {
1742 		bp->b_data = unmapped_buf;
1743 		BUF_CHECK_UNMAPPED(bp);
1744 	} else {
1745 		bp->b_data = bp->b_kvabase;
1746 		BUF_CHECK_MAPPED(bp);
1747 	}
1748 	return (0);
1749 }
1750 
1751 /*
1752  *	bufkva_reclaim:
1753  *
1754  *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
1755  *	callback that fires to avoid returning failure.
1756  */
1757 static void
1758 bufkva_reclaim(vmem_t *vmem, int flags)
1759 {
1760 	int i;
1761 
1762 	for (i = 0; i < 5; i++)
1763 		if (buf_scan(true) != 0)
1764 			break;
1765 	return;
1766 }
1767 
1768 
1769 /*
1770  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
1771  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
1772  * the buffer is valid and we do not have to do anything.
1773  */
1774 void
1775 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize,
1776     int cnt, struct ucred * cred)
1777 {
1778 	struct buf *rabp;
1779 	int i;
1780 
1781 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
1782 		if (inmem(vp, *rablkno))
1783 			continue;
1784 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
1785 
1786 		if ((rabp->b_flags & B_CACHE) == 0) {
1787 			if (!TD_IS_IDLETHREAD(curthread))
1788 				curthread->td_ru.ru_inblock++;
1789 			rabp->b_flags |= B_ASYNC;
1790 			rabp->b_flags &= ~B_INVAL;
1791 			rabp->b_ioflags &= ~BIO_ERROR;
1792 			rabp->b_iocmd = BIO_READ;
1793 			if (rabp->b_rcred == NOCRED && cred != NOCRED)
1794 				rabp->b_rcred = crhold(cred);
1795 			vfs_busy_pages(rabp, 0);
1796 			BUF_KERNPROC(rabp);
1797 			rabp->b_iooffset = dbtob(rabp->b_blkno);
1798 			bstrategy(rabp);
1799 		} else {
1800 			brelse(rabp);
1801 		}
1802 	}
1803 }
1804 
1805 /*
1806  * Entry point for bread() and breadn() via #defines in sys/buf.h.
1807  *
1808  * Get a buffer with the specified data.  Look in the cache first.  We
1809  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
1810  * is set, the buffer is valid and we do not have to do anything, see
1811  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
1812  */
1813 int
1814 breadn_flags(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablkno,
1815     int *rabsize, int cnt, struct ucred *cred, int flags, struct buf **bpp)
1816 {
1817 	struct buf *bp;
1818 	int rv = 0, readwait = 0;
1819 
1820 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
1821 	/*
1822 	 * Can only return NULL if GB_LOCK_NOWAIT flag is specified.
1823 	 */
1824 	*bpp = bp = getblk(vp, blkno, size, 0, 0, flags);
1825 	if (bp == NULL)
1826 		return (EBUSY);
1827 
1828 	/* if not found in cache, do some I/O */
1829 	if ((bp->b_flags & B_CACHE) == 0) {
1830 		if (!TD_IS_IDLETHREAD(curthread))
1831 			curthread->td_ru.ru_inblock++;
1832 		bp->b_iocmd = BIO_READ;
1833 		bp->b_flags &= ~B_INVAL;
1834 		bp->b_ioflags &= ~BIO_ERROR;
1835 		if (bp->b_rcred == NOCRED && cred != NOCRED)
1836 			bp->b_rcred = crhold(cred);
1837 		vfs_busy_pages(bp, 0);
1838 		bp->b_iooffset = dbtob(bp->b_blkno);
1839 		bstrategy(bp);
1840 		++readwait;
1841 	}
1842 
1843 	breada(vp, rablkno, rabsize, cnt, cred);
1844 
1845 	if (readwait) {
1846 		rv = bufwait(bp);
1847 	}
1848 	return (rv);
1849 }
1850 
1851 /*
1852  * Write, release buffer on completion.  (Done by iodone
1853  * if async).  Do not bother writing anything if the buffer
1854  * is invalid.
1855  *
1856  * Note that we set B_CACHE here, indicating that buffer is
1857  * fully valid and thus cacheable.  This is true even of NFS
1858  * now so we set it generally.  This could be set either here
1859  * or in biodone() since the I/O is synchronous.  We put it
1860  * here.
1861  */
1862 int
1863 bufwrite(struct buf *bp)
1864 {
1865 	int oldflags;
1866 	struct vnode *vp;
1867 	long space;
1868 	int vp_md;
1869 
1870 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1871 	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
1872 		bp->b_flags |= B_INVAL | B_RELBUF;
1873 		bp->b_flags &= ~B_CACHE;
1874 		brelse(bp);
1875 		return (ENXIO);
1876 	}
1877 	if (bp->b_flags & B_INVAL) {
1878 		brelse(bp);
1879 		return (0);
1880 	}
1881 
1882 	if (bp->b_flags & B_BARRIER)
1883 		barrierwrites++;
1884 
1885 	oldflags = bp->b_flags;
1886 
1887 	BUF_ASSERT_HELD(bp);
1888 
1889 	if (bp->b_pin_count > 0)
1890 		bunpin_wait(bp);
1891 
1892 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
1893 	    ("FFS background buffer should not get here %p", bp));
1894 
1895 	vp = bp->b_vp;
1896 	if (vp)
1897 		vp_md = vp->v_vflag & VV_MD;
1898 	else
1899 		vp_md = 0;
1900 
1901 	/*
1902 	 * Mark the buffer clean.  Increment the bufobj write count
1903 	 * before bundirty() call, to prevent other thread from seeing
1904 	 * empty dirty list and zero counter for writes in progress,
1905 	 * falsely indicating that the bufobj is clean.
1906 	 */
1907 	bufobj_wref(bp->b_bufobj);
1908 	bundirty(bp);
1909 
1910 	bp->b_flags &= ~B_DONE;
1911 	bp->b_ioflags &= ~BIO_ERROR;
1912 	bp->b_flags |= B_CACHE;
1913 	bp->b_iocmd = BIO_WRITE;
1914 
1915 	vfs_busy_pages(bp, 1);
1916 
1917 	/*
1918 	 * Normal bwrites pipeline writes
1919 	 */
1920 	bp->b_runningbufspace = bp->b_bufsize;
1921 	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
1922 
1923 	if (!TD_IS_IDLETHREAD(curthread))
1924 		curthread->td_ru.ru_oublock++;
1925 	if (oldflags & B_ASYNC)
1926 		BUF_KERNPROC(bp);
1927 	bp->b_iooffset = dbtob(bp->b_blkno);
1928 	bstrategy(bp);
1929 
1930 	if ((oldflags & B_ASYNC) == 0) {
1931 		int rtval = bufwait(bp);
1932 		brelse(bp);
1933 		return (rtval);
1934 	} else if (space > hirunningspace) {
1935 		/*
1936 		 * don't allow the async write to saturate the I/O
1937 		 * system.  We will not deadlock here because
1938 		 * we are blocking waiting for I/O that is already in-progress
1939 		 * to complete. We do not block here if it is the update
1940 		 * or syncer daemon trying to clean up as that can lead
1941 		 * to deadlock.
1942 		 */
1943 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
1944 			waitrunningbufspace();
1945 	}
1946 
1947 	return (0);
1948 }
1949 
1950 void
1951 bufbdflush(struct bufobj *bo, struct buf *bp)
1952 {
1953 	struct buf *nbp;
1954 
1955 	if (bo->bo_dirty.bv_cnt > dirtybufthresh + 10) {
1956 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
1957 		altbufferflushes++;
1958 	} else if (bo->bo_dirty.bv_cnt > dirtybufthresh) {
1959 		BO_LOCK(bo);
1960 		/*
1961 		 * Try to find a buffer to flush.
1962 		 */
1963 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
1964 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
1965 			    BUF_LOCK(nbp,
1966 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
1967 				continue;
1968 			if (bp == nbp)
1969 				panic("bdwrite: found ourselves");
1970 			BO_UNLOCK(bo);
1971 			/* Don't countdeps with the bo lock held. */
1972 			if (buf_countdeps(nbp, 0)) {
1973 				BO_LOCK(bo);
1974 				BUF_UNLOCK(nbp);
1975 				continue;
1976 			}
1977 			if (nbp->b_flags & B_CLUSTEROK) {
1978 				vfs_bio_awrite(nbp);
1979 			} else {
1980 				bremfree(nbp);
1981 				bawrite(nbp);
1982 			}
1983 			dirtybufferflushes++;
1984 			break;
1985 		}
1986 		if (nbp == NULL)
1987 			BO_UNLOCK(bo);
1988 	}
1989 }
1990 
1991 /*
1992  * Delayed write. (Buffer is marked dirty).  Do not bother writing
1993  * anything if the buffer is marked invalid.
1994  *
1995  * Note that since the buffer must be completely valid, we can safely
1996  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
1997  * biodone() in order to prevent getblk from writing the buffer
1998  * out synchronously.
1999  */
2000 void
2001 bdwrite(struct buf *bp)
2002 {
2003 	struct thread *td = curthread;
2004 	struct vnode *vp;
2005 	struct bufobj *bo;
2006 
2007 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2008 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2009 	KASSERT((bp->b_flags & B_BARRIER) == 0,
2010 	    ("Barrier request in delayed write %p", bp));
2011 	BUF_ASSERT_HELD(bp);
2012 
2013 	if (bp->b_flags & B_INVAL) {
2014 		brelse(bp);
2015 		return;
2016 	}
2017 
2018 	/*
2019 	 * If we have too many dirty buffers, don't create any more.
2020 	 * If we are wildly over our limit, then force a complete
2021 	 * cleanup. Otherwise, just keep the situation from getting
2022 	 * out of control. Note that we have to avoid a recursive
2023 	 * disaster and not try to clean up after our own cleanup!
2024 	 */
2025 	vp = bp->b_vp;
2026 	bo = bp->b_bufobj;
2027 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2028 		td->td_pflags |= TDP_INBDFLUSH;
2029 		BO_BDFLUSH(bo, bp);
2030 		td->td_pflags &= ~TDP_INBDFLUSH;
2031 	} else
2032 		recursiveflushes++;
2033 
2034 	bdirty(bp);
2035 	/*
2036 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2037 	 * true even of NFS now.
2038 	 */
2039 	bp->b_flags |= B_CACHE;
2040 
2041 	/*
2042 	 * This bmap keeps the system from needing to do the bmap later,
2043 	 * perhaps when the system is attempting to do a sync.  Since it
2044 	 * is likely that the indirect block -- or whatever other datastructure
2045 	 * that the filesystem needs is still in memory now, it is a good
2046 	 * thing to do this.  Note also, that if the pageout daemon is
2047 	 * requesting a sync -- there might not be enough memory to do
2048 	 * the bmap then...  So, this is important to do.
2049 	 */
2050 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2051 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2052 	}
2053 
2054 	/*
2055 	 * Set the *dirty* buffer range based upon the VM system dirty
2056 	 * pages.
2057 	 *
2058 	 * Mark the buffer pages as clean.  We need to do this here to
2059 	 * satisfy the vnode_pager and the pageout daemon, so that it
2060 	 * thinks that the pages have been "cleaned".  Note that since
2061 	 * the pages are in a delayed write buffer -- the VFS layer
2062 	 * "will" see that the pages get written out on the next sync,
2063 	 * or perhaps the cluster will be completed.
2064 	 */
2065 	vfs_clean_pages_dirty_buf(bp);
2066 	bqrelse(bp);
2067 
2068 	/*
2069 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2070 	 * due to the softdep code.
2071 	 */
2072 }
2073 
2074 /*
2075  *	bdirty:
2076  *
2077  *	Turn buffer into delayed write request.  We must clear BIO_READ and
2078  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2079  *	itself to properly update it in the dirty/clean lists.  We mark it
2080  *	B_DONE to ensure that any asynchronization of the buffer properly
2081  *	clears B_DONE ( else a panic will occur later ).
2082  *
2083  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2084  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2085  *	should only be called if the buffer is known-good.
2086  *
2087  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2088  *	count.
2089  *
2090  *	The buffer must be on QUEUE_NONE.
2091  */
2092 void
2093 bdirty(struct buf *bp)
2094 {
2095 
2096 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2097 	    bp, bp->b_vp, bp->b_flags);
2098 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2099 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2100 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2101 	BUF_ASSERT_HELD(bp);
2102 	bp->b_flags &= ~(B_RELBUF);
2103 	bp->b_iocmd = BIO_WRITE;
2104 
2105 	if ((bp->b_flags & B_DELWRI) == 0) {
2106 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2107 		reassignbuf(bp);
2108 		bdirtyadd();
2109 	}
2110 }
2111 
2112 /*
2113  *	bundirty:
2114  *
2115  *	Clear B_DELWRI for buffer.
2116  *
2117  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2118  *	count.
2119  *
2120  *	The buffer must be on QUEUE_NONE.
2121  */
2122 
2123 void
2124 bundirty(struct buf *bp)
2125 {
2126 
2127 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2128 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2129 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2130 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2131 	BUF_ASSERT_HELD(bp);
2132 
2133 	if (bp->b_flags & B_DELWRI) {
2134 		bp->b_flags &= ~B_DELWRI;
2135 		reassignbuf(bp);
2136 		bdirtysub();
2137 	}
2138 	/*
2139 	 * Since it is now being written, we can clear its deferred write flag.
2140 	 */
2141 	bp->b_flags &= ~B_DEFERRED;
2142 }
2143 
2144 /*
2145  *	bawrite:
2146  *
2147  *	Asynchronous write.  Start output on a buffer, but do not wait for
2148  *	it to complete.  The buffer is released when the output completes.
2149  *
2150  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2151  *	B_INVAL buffers.  Not us.
2152  */
2153 void
2154 bawrite(struct buf *bp)
2155 {
2156 
2157 	bp->b_flags |= B_ASYNC;
2158 	(void) bwrite(bp);
2159 }
2160 
2161 /*
2162  *	babarrierwrite:
2163  *
2164  *	Asynchronous barrier write.  Start output on a buffer, but do not
2165  *	wait for it to complete.  Place a write barrier after this write so
2166  *	that this buffer and all buffers written before it are committed to
2167  *	the disk before any buffers written after this write are committed
2168  *	to the disk.  The buffer is released when the output completes.
2169  */
2170 void
2171 babarrierwrite(struct buf *bp)
2172 {
2173 
2174 	bp->b_flags |= B_ASYNC | B_BARRIER;
2175 	(void) bwrite(bp);
2176 }
2177 
2178 /*
2179  *	bbarrierwrite:
2180  *
2181  *	Synchronous barrier write.  Start output on a buffer and wait for
2182  *	it to complete.  Place a write barrier after this write so that
2183  *	this buffer and all buffers written before it are committed to
2184  *	the disk before any buffers written after this write are committed
2185  *	to the disk.  The buffer is released when the output completes.
2186  */
2187 int
2188 bbarrierwrite(struct buf *bp)
2189 {
2190 
2191 	bp->b_flags |= B_BARRIER;
2192 	return (bwrite(bp));
2193 }
2194 
2195 /*
2196  *	bwillwrite:
2197  *
2198  *	Called prior to the locking of any vnodes when we are expecting to
2199  *	write.  We do not want to starve the buffer cache with too many
2200  *	dirty buffers so we block here.  By blocking prior to the locking
2201  *	of any vnodes we attempt to avoid the situation where a locked vnode
2202  *	prevents the various system daemons from flushing related buffers.
2203  */
2204 void
2205 bwillwrite(void)
2206 {
2207 
2208 	if (numdirtybuffers >= hidirtybuffers) {
2209 		mtx_lock(&bdirtylock);
2210 		while (numdirtybuffers >= hidirtybuffers) {
2211 			bdirtywait = 1;
2212 			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2213 			    "flswai", 0);
2214 		}
2215 		mtx_unlock(&bdirtylock);
2216 	}
2217 }
2218 
2219 /*
2220  * Return true if we have too many dirty buffers.
2221  */
2222 int
2223 buf_dirty_count_severe(void)
2224 {
2225 
2226 	return(numdirtybuffers >= hidirtybuffers);
2227 }
2228 
2229 /*
2230  *	brelse:
2231  *
2232  *	Release a busy buffer and, if requested, free its resources.  The
2233  *	buffer will be stashed in the appropriate bufqueue[] allowing it
2234  *	to be accessed later as a cache entity or reused for other purposes.
2235  */
2236 void
2237 brelse(struct buf *bp)
2238 {
2239 	int qindex;
2240 
2241 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2242 	    bp, bp->b_vp, bp->b_flags);
2243 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2244 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2245 	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2246 	    ("brelse: non-VMIO buffer marked NOREUSE"));
2247 
2248 	if (BUF_LOCKRECURSED(bp)) {
2249 		/*
2250 		 * Do not process, in particular, do not handle the
2251 		 * B_INVAL/B_RELBUF and do not release to free list.
2252 		 */
2253 		BUF_UNLOCK(bp);
2254 		return;
2255 	}
2256 
2257 	if (bp->b_flags & B_MANAGED) {
2258 		bqrelse(bp);
2259 		return;
2260 	}
2261 
2262 	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2263 		BO_LOCK(bp->b_bufobj);
2264 		bp->b_vflags &= ~BV_BKGRDERR;
2265 		BO_UNLOCK(bp->b_bufobj);
2266 		bdirty(bp);
2267 	}
2268 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2269 	    bp->b_error == EIO && !(bp->b_flags & B_INVAL)) {
2270 		/*
2271 		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
2272 		 * pages from being scrapped.  If the error is anything
2273 		 * other than an I/O error (EIO), assume that retrying
2274 		 * is futile.
2275 		 */
2276 		bp->b_ioflags &= ~BIO_ERROR;
2277 		bdirty(bp);
2278 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2279 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2280 		/*
2281 		 * Either a failed I/O or we were asked to free or not
2282 		 * cache the buffer.
2283 		 */
2284 		bp->b_flags |= B_INVAL;
2285 		if (!LIST_EMPTY(&bp->b_dep))
2286 			buf_deallocate(bp);
2287 		if (bp->b_flags & B_DELWRI)
2288 			bdirtysub();
2289 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2290 		if ((bp->b_flags & B_VMIO) == 0) {
2291 			allocbuf(bp, 0);
2292 			if (bp->b_vp)
2293 				brelvp(bp);
2294 		}
2295 	}
2296 
2297 	/*
2298 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2299 	 * is called with B_DELWRI set, the underlying pages may wind up
2300 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2301 	 * because pages associated with a B_DELWRI bp are marked clean.
2302 	 *
2303 	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2304 	 * if B_DELWRI is set.
2305 	 */
2306 	if (bp->b_flags & B_DELWRI)
2307 		bp->b_flags &= ~B_RELBUF;
2308 
2309 	/*
2310 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2311 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2312 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2313 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2314 	 *
2315 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2316 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2317 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2318 	 *
2319 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2320 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2321 	 * the commit state and we cannot afford to lose the buffer. If the
2322 	 * buffer has a background write in progress, we need to keep it
2323 	 * around to prevent it from being reconstituted and starting a second
2324 	 * background write.
2325 	 */
2326 	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2327 	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2328 	    !(bp->b_vp->v_mount != NULL &&
2329 	    (bp->b_vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2330 	    !vn_isdisk(bp->b_vp, NULL) && (bp->b_flags & B_DELWRI))) {
2331 		vfs_vmio_invalidate(bp);
2332 		allocbuf(bp, 0);
2333 	}
2334 
2335 	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2336 	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2337 		allocbuf(bp, 0);
2338 		bp->b_flags &= ~B_NOREUSE;
2339 		if (bp->b_vp != NULL)
2340 			brelvp(bp);
2341 	}
2342 
2343 	/*
2344 	 * If the buffer has junk contents signal it and eventually
2345 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2346 	 * doesn't find it.
2347 	 */
2348 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2349 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2350 		bp->b_flags |= B_INVAL;
2351 	if (bp->b_flags & B_INVAL) {
2352 		if (bp->b_flags & B_DELWRI)
2353 			bundirty(bp);
2354 		if (bp->b_vp)
2355 			brelvp(bp);
2356 	}
2357 
2358 	/* buffers with no memory */
2359 	if (bp->b_bufsize == 0) {
2360 		buf_free(bp);
2361 		return;
2362 	}
2363 	/* buffers with junk contents */
2364 	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2365 	    (bp->b_ioflags & BIO_ERROR)) {
2366 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2367 		if (bp->b_vflags & BV_BKGRDINPROG)
2368 			panic("losing buffer 2");
2369 		qindex = QUEUE_CLEAN;
2370 		bp->b_flags |= B_AGE;
2371 	/* remaining buffers */
2372 	} else if (bp->b_flags & B_DELWRI)
2373 		qindex = QUEUE_DIRTY;
2374 	else
2375 		qindex = QUEUE_CLEAN;
2376 
2377 	binsfree(bp, qindex);
2378 
2379 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF | B_DIRECT);
2380 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2381 		panic("brelse: not dirty");
2382 	/* unlock */
2383 	BUF_UNLOCK(bp);
2384 	if (qindex == QUEUE_CLEAN)
2385 		bufspace_wakeup();
2386 }
2387 
2388 /*
2389  * Release a buffer back to the appropriate queue but do not try to free
2390  * it.  The buffer is expected to be used again soon.
2391  *
2392  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2393  * biodone() to requeue an async I/O on completion.  It is also used when
2394  * known good buffers need to be requeued but we think we may need the data
2395  * again soon.
2396  *
2397  * XXX we should be able to leave the B_RELBUF hint set on completion.
2398  */
2399 void
2400 bqrelse(struct buf *bp)
2401 {
2402 	int qindex;
2403 
2404 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2405 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2406 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2407 
2408 	qindex = QUEUE_NONE;
2409 	if (BUF_LOCKRECURSED(bp)) {
2410 		/* do not release to free list */
2411 		BUF_UNLOCK(bp);
2412 		return;
2413 	}
2414 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2415 
2416 	if (bp->b_flags & B_MANAGED) {
2417 		if (bp->b_flags & B_REMFREE)
2418 			bremfreef(bp);
2419 		goto out;
2420 	}
2421 
2422 	/* buffers with stale but valid contents */
2423 	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2424 	    BV_BKGRDERR)) == BV_BKGRDERR) {
2425 		BO_LOCK(bp->b_bufobj);
2426 		bp->b_vflags &= ~BV_BKGRDERR;
2427 		BO_UNLOCK(bp->b_bufobj);
2428 		qindex = QUEUE_DIRTY;
2429 	} else {
2430 		if ((bp->b_flags & B_DELWRI) == 0 &&
2431 		    (bp->b_xflags & BX_VNDIRTY))
2432 			panic("bqrelse: not dirty");
2433 		if ((bp->b_flags & B_NOREUSE) != 0) {
2434 			brelse(bp);
2435 			return;
2436 		}
2437 		qindex = QUEUE_CLEAN;
2438 	}
2439 	binsfree(bp, qindex);
2440 
2441 out:
2442 	/* unlock */
2443 	BUF_UNLOCK(bp);
2444 	if (qindex == QUEUE_CLEAN)
2445 		bufspace_wakeup();
2446 }
2447 
2448 /*
2449  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2450  * restore bogus pages.
2451  */
2452 static void
2453 vfs_vmio_iodone(struct buf *bp)
2454 {
2455 	vm_ooffset_t foff;
2456 	vm_page_t m;
2457 	vm_object_t obj;
2458 	struct vnode *vp;
2459 	int bogus, i, iosize;
2460 
2461 	obj = bp->b_bufobj->bo_object;
2462 	KASSERT(obj->paging_in_progress >= bp->b_npages,
2463 	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2464 	    obj->paging_in_progress, bp->b_npages));
2465 
2466 	vp = bp->b_vp;
2467 	KASSERT(vp->v_holdcnt > 0,
2468 	    ("vfs_vmio_iodone: vnode %p has zero hold count", vp));
2469 	KASSERT(vp->v_object != NULL,
2470 	    ("vfs_vmio_iodone: vnode %p has no vm_object", vp));
2471 
2472 	foff = bp->b_offset;
2473 	KASSERT(bp->b_offset != NOOFFSET,
2474 	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2475 
2476 	bogus = 0;
2477 	iosize = bp->b_bcount - bp->b_resid;
2478 	VM_OBJECT_WLOCK(obj);
2479 	for (i = 0; i < bp->b_npages; i++) {
2480 		int resid;
2481 
2482 		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2483 		if (resid > iosize)
2484 			resid = iosize;
2485 
2486 		/*
2487 		 * cleanup bogus pages, restoring the originals
2488 		 */
2489 		m = bp->b_pages[i];
2490 		if (m == bogus_page) {
2491 			bogus = 1;
2492 			m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2493 			if (m == NULL)
2494 				panic("biodone: page disappeared!");
2495 			bp->b_pages[i] = m;
2496 		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2497 			/*
2498 			 * In the write case, the valid and clean bits are
2499 			 * already changed correctly ( see bdwrite() ), so we
2500 			 * only need to do this here in the read case.
2501 			 */
2502 			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2503 			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2504 			    "has unexpected dirty bits", m));
2505 			vfs_page_set_valid(bp, foff, m);
2506 		}
2507 		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2508 		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2509 		    (intmax_t)foff, (uintmax_t)m->pindex));
2510 
2511 		vm_page_sunbusy(m);
2512 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2513 		iosize -= resid;
2514 	}
2515 	vm_object_pip_wakeupn(obj, bp->b_npages);
2516 	VM_OBJECT_WUNLOCK(obj);
2517 	if (bogus && buf_mapped(bp)) {
2518 		BUF_CHECK_MAPPED(bp);
2519 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
2520 		    bp->b_pages, bp->b_npages);
2521 	}
2522 }
2523 
2524 /*
2525  * Unwire a page held by a buf and place it on the appropriate vm queue.
2526  */
2527 static void
2528 vfs_vmio_unwire(struct buf *bp, vm_page_t m)
2529 {
2530 	bool freed;
2531 
2532 	vm_page_lock(m);
2533 	if (vm_page_unwire(m, PQ_NONE)) {
2534 		/*
2535 		 * Determine if the page should be freed before adding
2536 		 * it to the inactive queue.
2537 		 */
2538 		if (m->valid == 0) {
2539 			freed = !vm_page_busied(m);
2540 			if (freed)
2541 				vm_page_free(m);
2542 		} else if ((bp->b_flags & B_DIRECT) != 0)
2543 			freed = vm_page_try_to_free(m);
2544 		else
2545 			freed = false;
2546 		if (!freed) {
2547 			/*
2548 			 * If the page is unlikely to be reused, let the
2549 			 * VM know.  Otherwise, maintain LRU page
2550 			 * ordering and put the page at the tail of the
2551 			 * inactive queue.
2552 			 */
2553 			if ((bp->b_flags & B_NOREUSE) != 0)
2554 				vm_page_deactivate_noreuse(m);
2555 			else
2556 				vm_page_deactivate(m);
2557 		}
2558 	}
2559 	vm_page_unlock(m);
2560 }
2561 
2562 /*
2563  * Perform page invalidation when a buffer is released.  The fully invalid
2564  * pages will be reclaimed later in vfs_vmio_truncate().
2565  */
2566 static void
2567 vfs_vmio_invalidate(struct buf *bp)
2568 {
2569 	vm_object_t obj;
2570 	vm_page_t m;
2571 	int i, resid, poffset, presid;
2572 
2573 	if (buf_mapped(bp)) {
2574 		BUF_CHECK_MAPPED(bp);
2575 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
2576 	} else
2577 		BUF_CHECK_UNMAPPED(bp);
2578 	/*
2579 	 * Get the base offset and length of the buffer.  Note that
2580 	 * in the VMIO case if the buffer block size is not
2581 	 * page-aligned then b_data pointer may not be page-aligned.
2582 	 * But our b_pages[] array *IS* page aligned.
2583 	 *
2584 	 * block sizes less then DEV_BSIZE (usually 512) are not
2585 	 * supported due to the page granularity bits (m->valid,
2586 	 * m->dirty, etc...).
2587 	 *
2588 	 * See man buf(9) for more information
2589 	 */
2590 	obj = bp->b_bufobj->bo_object;
2591 	resid = bp->b_bufsize;
2592 	poffset = bp->b_offset & PAGE_MASK;
2593 	VM_OBJECT_WLOCK(obj);
2594 	for (i = 0; i < bp->b_npages; i++) {
2595 		m = bp->b_pages[i];
2596 		if (m == bogus_page)
2597 			panic("vfs_vmio_invalidate: Unexpected bogus page.");
2598 		bp->b_pages[i] = NULL;
2599 
2600 		presid = resid > (PAGE_SIZE - poffset) ?
2601 		    (PAGE_SIZE - poffset) : resid;
2602 		KASSERT(presid >= 0, ("brelse: extra page"));
2603 		while (vm_page_xbusied(m)) {
2604 			vm_page_lock(m);
2605 			VM_OBJECT_WUNLOCK(obj);
2606 			vm_page_busy_sleep(m, "mbncsh");
2607 			VM_OBJECT_WLOCK(obj);
2608 		}
2609 		if (pmap_page_wired_mappings(m) == 0)
2610 			vm_page_set_invalid(m, poffset, presid);
2611 		vfs_vmio_unwire(bp, m);
2612 		resid -= presid;
2613 		poffset = 0;
2614 	}
2615 	VM_OBJECT_WUNLOCK(obj);
2616 	bp->b_npages = 0;
2617 }
2618 
2619 /*
2620  * Page-granular truncation of an existing VMIO buffer.
2621  */
2622 static void
2623 vfs_vmio_truncate(struct buf *bp, int desiredpages)
2624 {
2625 	vm_object_t obj;
2626 	vm_page_t m;
2627 	int i;
2628 
2629 	if (bp->b_npages == desiredpages)
2630 		return;
2631 
2632 	if (buf_mapped(bp)) {
2633 		BUF_CHECK_MAPPED(bp);
2634 		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
2635 		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
2636 	} else
2637 		BUF_CHECK_UNMAPPED(bp);
2638 	obj = bp->b_bufobj->bo_object;
2639 	if (obj != NULL)
2640 		VM_OBJECT_WLOCK(obj);
2641 	for (i = desiredpages; i < bp->b_npages; i++) {
2642 		m = bp->b_pages[i];
2643 		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
2644 		bp->b_pages[i] = NULL;
2645 		vfs_vmio_unwire(bp, m);
2646 	}
2647 	if (obj != NULL)
2648 		VM_OBJECT_WUNLOCK(obj);
2649 	bp->b_npages = desiredpages;
2650 }
2651 
2652 /*
2653  * Byte granular extension of VMIO buffers.
2654  */
2655 static void
2656 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
2657 {
2658 	/*
2659 	 * We are growing the buffer, possibly in a
2660 	 * byte-granular fashion.
2661 	 */
2662 	vm_object_t obj;
2663 	vm_offset_t toff;
2664 	vm_offset_t tinc;
2665 	vm_page_t m;
2666 
2667 	/*
2668 	 * Step 1, bring in the VM pages from the object, allocating
2669 	 * them if necessary.  We must clear B_CACHE if these pages
2670 	 * are not valid for the range covered by the buffer.
2671 	 */
2672 	obj = bp->b_bufobj->bo_object;
2673 	VM_OBJECT_WLOCK(obj);
2674 	while (bp->b_npages < desiredpages) {
2675 		/*
2676 		 * We must allocate system pages since blocking
2677 		 * here could interfere with paging I/O, no
2678 		 * matter which process we are.
2679 		 *
2680 		 * Only exclusive busy can be tested here.
2681 		 * Blocking on shared busy might lead to
2682 		 * deadlocks once allocbuf() is called after
2683 		 * pages are vfs_busy_pages().
2684 		 */
2685 		m = vm_page_grab(obj, OFF_TO_IDX(bp->b_offset) + bp->b_npages,
2686 		    VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM |
2687 		    VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
2688 		    VM_ALLOC_COUNT(desiredpages - bp->b_npages));
2689 		if (m->valid == 0)
2690 			bp->b_flags &= ~B_CACHE;
2691 		bp->b_pages[bp->b_npages] = m;
2692 		++bp->b_npages;
2693 	}
2694 
2695 	/*
2696 	 * Step 2.  We've loaded the pages into the buffer,
2697 	 * we have to figure out if we can still have B_CACHE
2698 	 * set.  Note that B_CACHE is set according to the
2699 	 * byte-granular range ( bcount and size ), not the
2700 	 * aligned range ( newbsize ).
2701 	 *
2702 	 * The VM test is against m->valid, which is DEV_BSIZE
2703 	 * aligned.  Needless to say, the validity of the data
2704 	 * needs to also be DEV_BSIZE aligned.  Note that this
2705 	 * fails with NFS if the server or some other client
2706 	 * extends the file's EOF.  If our buffer is resized,
2707 	 * B_CACHE may remain set! XXX
2708 	 */
2709 	toff = bp->b_bcount;
2710 	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2711 	while ((bp->b_flags & B_CACHE) && toff < size) {
2712 		vm_pindex_t pi;
2713 
2714 		if (tinc > (size - toff))
2715 			tinc = size - toff;
2716 		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
2717 		m = bp->b_pages[pi];
2718 		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
2719 		toff += tinc;
2720 		tinc = PAGE_SIZE;
2721 	}
2722 	VM_OBJECT_WUNLOCK(obj);
2723 
2724 	/*
2725 	 * Step 3, fixup the KVA pmap.
2726 	 */
2727 	if (buf_mapped(bp))
2728 		bpmap_qenter(bp);
2729 	else
2730 		BUF_CHECK_UNMAPPED(bp);
2731 }
2732 
2733 /*
2734  * Check to see if a block at a particular lbn is available for a clustered
2735  * write.
2736  */
2737 static int
2738 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
2739 {
2740 	struct buf *bpa;
2741 	int match;
2742 
2743 	match = 0;
2744 
2745 	/* If the buf isn't in core skip it */
2746 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
2747 		return (0);
2748 
2749 	/* If the buf is busy we don't want to wait for it */
2750 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
2751 		return (0);
2752 
2753 	/* Only cluster with valid clusterable delayed write buffers */
2754 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
2755 	    (B_DELWRI | B_CLUSTEROK))
2756 		goto done;
2757 
2758 	if (bpa->b_bufsize != size)
2759 		goto done;
2760 
2761 	/*
2762 	 * Check to see if it is in the expected place on disk and that the
2763 	 * block has been mapped.
2764 	 */
2765 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
2766 		match = 1;
2767 done:
2768 	BUF_UNLOCK(bpa);
2769 	return (match);
2770 }
2771 
2772 /*
2773  *	vfs_bio_awrite:
2774  *
2775  *	Implement clustered async writes for clearing out B_DELWRI buffers.
2776  *	This is much better then the old way of writing only one buffer at
2777  *	a time.  Note that we may not be presented with the buffers in the
2778  *	correct order, so we search for the cluster in both directions.
2779  */
2780 int
2781 vfs_bio_awrite(struct buf *bp)
2782 {
2783 	struct bufobj *bo;
2784 	int i;
2785 	int j;
2786 	daddr_t lblkno = bp->b_lblkno;
2787 	struct vnode *vp = bp->b_vp;
2788 	int ncl;
2789 	int nwritten;
2790 	int size;
2791 	int maxcl;
2792 	int gbflags;
2793 
2794 	bo = &vp->v_bufobj;
2795 	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
2796 	/*
2797 	 * right now we support clustered writing only to regular files.  If
2798 	 * we find a clusterable block we could be in the middle of a cluster
2799 	 * rather then at the beginning.
2800 	 */
2801 	if ((vp->v_type == VREG) &&
2802 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
2803 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
2804 
2805 		size = vp->v_mount->mnt_stat.f_iosize;
2806 		maxcl = MAXPHYS / size;
2807 
2808 		BO_RLOCK(bo);
2809 		for (i = 1; i < maxcl; i++)
2810 			if (vfs_bio_clcheck(vp, size, lblkno + i,
2811 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
2812 				break;
2813 
2814 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
2815 			if (vfs_bio_clcheck(vp, size, lblkno - j,
2816 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
2817 				break;
2818 		BO_RUNLOCK(bo);
2819 		--j;
2820 		ncl = i + j;
2821 		/*
2822 		 * this is a possible cluster write
2823 		 */
2824 		if (ncl != 1) {
2825 			BUF_UNLOCK(bp);
2826 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
2827 			    gbflags);
2828 			return (nwritten);
2829 		}
2830 	}
2831 	bremfree(bp);
2832 	bp->b_flags |= B_ASYNC;
2833 	/*
2834 	 * default (old) behavior, writing out only one block
2835 	 *
2836 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
2837 	 */
2838 	nwritten = bp->b_bufsize;
2839 	(void) bwrite(bp);
2840 
2841 	return (nwritten);
2842 }
2843 
2844 /*
2845  *	getnewbuf_kva:
2846  *
2847  *	Allocate KVA for an empty buf header according to gbflags.
2848  */
2849 static int
2850 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
2851 {
2852 
2853 	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
2854 		/*
2855 		 * In order to keep fragmentation sane we only allocate kva
2856 		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
2857 		 */
2858 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
2859 
2860 		if (maxsize != bp->b_kvasize &&
2861 		    bufkva_alloc(bp, maxsize, gbflags))
2862 			return (ENOSPC);
2863 	}
2864 	return (0);
2865 }
2866 
2867 /*
2868  *	getnewbuf:
2869  *
2870  *	Find and initialize a new buffer header, freeing up existing buffers
2871  *	in the bufqueues as necessary.  The new buffer is returned locked.
2872  *
2873  *	We block if:
2874  *		We have insufficient buffer headers
2875  *		We have insufficient buffer space
2876  *		buffer_arena is too fragmented ( space reservation fails )
2877  *		If we have to flush dirty buffers ( but we try to avoid this )
2878  *
2879  *	The caller is responsible for releasing the reserved bufspace after
2880  *	allocbuf() is called.
2881  */
2882 static struct buf *
2883 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
2884 {
2885 	struct buf *bp;
2886 	bool metadata, reserved;
2887 
2888 	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
2889 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
2890 	if (!unmapped_buf_allowed)
2891 		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
2892 
2893 	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
2894 	    vp->v_type == VCHR)
2895 		metadata = true;
2896 	else
2897 		metadata = false;
2898 	atomic_add_int(&getnewbufcalls, 1);
2899 	reserved = false;
2900 	do {
2901 		if (reserved == false &&
2902 		    bufspace_reserve(maxsize, metadata) != 0)
2903 			continue;
2904 		reserved = true;
2905 		if ((bp = buf_alloc()) == NULL)
2906 			continue;
2907 		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
2908 			return (bp);
2909 		break;
2910 	} while(buf_scan(false) == 0);
2911 
2912 	if (reserved)
2913 		bufspace_release(maxsize);
2914 	if (bp != NULL) {
2915 		bp->b_flags |= B_INVAL;
2916 		brelse(bp);
2917 	}
2918 	bufspace_wait(vp, gbflags, slpflag, slptimeo);
2919 
2920 	return (NULL);
2921 }
2922 
2923 /*
2924  *	buf_daemon:
2925  *
2926  *	buffer flushing daemon.  Buffers are normally flushed by the
2927  *	update daemon but if it cannot keep up this process starts to
2928  *	take the load in an attempt to prevent getnewbuf() from blocking.
2929  */
2930 static struct kproc_desc buf_kp = {
2931 	"bufdaemon",
2932 	buf_daemon,
2933 	&bufdaemonproc
2934 };
2935 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
2936 
2937 static int
2938 buf_flush(struct vnode *vp, int target)
2939 {
2940 	int flushed;
2941 
2942 	flushed = flushbufqueues(vp, target, 0);
2943 	if (flushed == 0) {
2944 		/*
2945 		 * Could not find any buffers without rollback
2946 		 * dependencies, so just write the first one
2947 		 * in the hopes of eventually making progress.
2948 		 */
2949 		if (vp != NULL && target > 2)
2950 			target /= 2;
2951 		flushbufqueues(vp, target, 1);
2952 	}
2953 	return (flushed);
2954 }
2955 
2956 static void
2957 buf_daemon()
2958 {
2959 	int lodirty;
2960 
2961 	/*
2962 	 * This process needs to be suspended prior to shutdown sync.
2963 	 */
2964 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
2965 	    SHUTDOWN_PRI_LAST);
2966 
2967 	/*
2968 	 * This process is allowed to take the buffer cache to the limit
2969 	 */
2970 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
2971 	mtx_lock(&bdlock);
2972 	for (;;) {
2973 		bd_request = 0;
2974 		mtx_unlock(&bdlock);
2975 
2976 		kproc_suspend_check(bufdaemonproc);
2977 		lodirty = lodirtybuffers;
2978 		if (bd_speedupreq) {
2979 			lodirty = numdirtybuffers / 2;
2980 			bd_speedupreq = 0;
2981 		}
2982 		/*
2983 		 * Do the flush.  Limit the amount of in-transit I/O we
2984 		 * allow to build up, otherwise we would completely saturate
2985 		 * the I/O system.
2986 		 */
2987 		while (numdirtybuffers > lodirty) {
2988 			if (buf_flush(NULL, numdirtybuffers - lodirty) == 0)
2989 				break;
2990 			kern_yield(PRI_USER);
2991 		}
2992 
2993 		/*
2994 		 * Only clear bd_request if we have reached our low water
2995 		 * mark.  The buf_daemon normally waits 1 second and
2996 		 * then incrementally flushes any dirty buffers that have
2997 		 * built up, within reason.
2998 		 *
2999 		 * If we were unable to hit our low water mark and couldn't
3000 		 * find any flushable buffers, we sleep for a short period
3001 		 * to avoid endless loops on unlockable buffers.
3002 		 */
3003 		mtx_lock(&bdlock);
3004 		if (numdirtybuffers <= lodirtybuffers) {
3005 			/*
3006 			 * We reached our low water mark, reset the
3007 			 * request and sleep until we are needed again.
3008 			 * The sleep is just so the suspend code works.
3009 			 */
3010 			bd_request = 0;
3011 			/*
3012 			 * Do an extra wakeup in case dirty threshold
3013 			 * changed via sysctl and the explicit transition
3014 			 * out of shortfall was missed.
3015 			 */
3016 			bdirtywakeup();
3017 			if (runningbufspace <= lorunningspace)
3018 				runningwakeup();
3019 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3020 		} else {
3021 			/*
3022 			 * We couldn't find any flushable dirty buffers but
3023 			 * still have too many dirty buffers, we
3024 			 * have to sleep and try again.  (rare)
3025 			 */
3026 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3027 		}
3028 	}
3029 }
3030 
3031 /*
3032  *	flushbufqueues:
3033  *
3034  *	Try to flush a buffer in the dirty queue.  We must be careful to
3035  *	free up B_INVAL buffers instead of write them, which NFS is
3036  *	particularly sensitive to.
3037  */
3038 static int flushwithdeps = 0;
3039 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
3040     0, "Number of buffers flushed with dependecies that require rollbacks");
3041 
3042 static int
3043 flushbufqueues(struct vnode *lvp, int target, int flushdeps)
3044 {
3045 	struct buf *sentinel;
3046 	struct vnode *vp;
3047 	struct mount *mp;
3048 	struct buf *bp;
3049 	int hasdeps;
3050 	int flushed;
3051 	int queue;
3052 	int error;
3053 	bool unlock;
3054 
3055 	flushed = 0;
3056 	queue = QUEUE_DIRTY;
3057 	bp = NULL;
3058 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3059 	sentinel->b_qindex = QUEUE_SENTINEL;
3060 	mtx_lock(&bqlocks[queue]);
3061 	TAILQ_INSERT_HEAD(&bufqueues[queue], sentinel, b_freelist);
3062 	mtx_unlock(&bqlocks[queue]);
3063 	while (flushed != target) {
3064 		maybe_yield();
3065 		mtx_lock(&bqlocks[queue]);
3066 		bp = TAILQ_NEXT(sentinel, b_freelist);
3067 		if (bp != NULL) {
3068 			TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3069 			TAILQ_INSERT_AFTER(&bufqueues[queue], bp, sentinel,
3070 			    b_freelist);
3071 		} else {
3072 			mtx_unlock(&bqlocks[queue]);
3073 			break;
3074 		}
3075 		/*
3076 		 * Skip sentinels inserted by other invocations of the
3077 		 * flushbufqueues(), taking care to not reorder them.
3078 		 *
3079 		 * Only flush the buffers that belong to the
3080 		 * vnode locked by the curthread.
3081 		 */
3082 		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3083 		    bp->b_vp != lvp)) {
3084 			mtx_unlock(&bqlocks[queue]);
3085  			continue;
3086 		}
3087 		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3088 		mtx_unlock(&bqlocks[queue]);
3089 		if (error != 0)
3090 			continue;
3091 		if (bp->b_pin_count > 0) {
3092 			BUF_UNLOCK(bp);
3093 			continue;
3094 		}
3095 		/*
3096 		 * BKGRDINPROG can only be set with the buf and bufobj
3097 		 * locks both held.  We tolerate a race to clear it here.
3098 		 */
3099 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3100 		    (bp->b_flags & B_DELWRI) == 0) {
3101 			BUF_UNLOCK(bp);
3102 			continue;
3103 		}
3104 		if (bp->b_flags & B_INVAL) {
3105 			bremfreef(bp);
3106 			brelse(bp);
3107 			flushed++;
3108 			continue;
3109 		}
3110 
3111 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3112 			if (flushdeps == 0) {
3113 				BUF_UNLOCK(bp);
3114 				continue;
3115 			}
3116 			hasdeps = 1;
3117 		} else
3118 			hasdeps = 0;
3119 		/*
3120 		 * We must hold the lock on a vnode before writing
3121 		 * one of its buffers. Otherwise we may confuse, or
3122 		 * in the case of a snapshot vnode, deadlock the
3123 		 * system.
3124 		 *
3125 		 * The lock order here is the reverse of the normal
3126 		 * of vnode followed by buf lock.  This is ok because
3127 		 * the NOWAIT will prevent deadlock.
3128 		 */
3129 		vp = bp->b_vp;
3130 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3131 			BUF_UNLOCK(bp);
3132 			continue;
3133 		}
3134 		if (lvp == NULL) {
3135 			unlock = true;
3136 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3137 		} else {
3138 			ASSERT_VOP_LOCKED(vp, "getbuf");
3139 			unlock = false;
3140 			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3141 			    vn_lock(vp, LK_TRYUPGRADE);
3142 		}
3143 		if (error == 0) {
3144 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3145 			    bp, bp->b_vp, bp->b_flags);
3146 			if (curproc == bufdaemonproc) {
3147 				vfs_bio_awrite(bp);
3148 			} else {
3149 				bremfree(bp);
3150 				bwrite(bp);
3151 				notbufdflushes++;
3152 			}
3153 			vn_finished_write(mp);
3154 			if (unlock)
3155 				VOP_UNLOCK(vp, 0);
3156 			flushwithdeps += hasdeps;
3157 			flushed++;
3158 
3159 			/*
3160 			 * Sleeping on runningbufspace while holding
3161 			 * vnode lock leads to deadlock.
3162 			 */
3163 			if (curproc == bufdaemonproc &&
3164 			    runningbufspace > hirunningspace)
3165 				waitrunningbufspace();
3166 			continue;
3167 		}
3168 		vn_finished_write(mp);
3169 		BUF_UNLOCK(bp);
3170 	}
3171 	mtx_lock(&bqlocks[queue]);
3172 	TAILQ_REMOVE(&bufqueues[queue], sentinel, b_freelist);
3173 	mtx_unlock(&bqlocks[queue]);
3174 	free(sentinel, M_TEMP);
3175 	return (flushed);
3176 }
3177 
3178 /*
3179  * Check to see if a block is currently memory resident.
3180  */
3181 struct buf *
3182 incore(struct bufobj *bo, daddr_t blkno)
3183 {
3184 	struct buf *bp;
3185 
3186 	BO_RLOCK(bo);
3187 	bp = gbincore(bo, blkno);
3188 	BO_RUNLOCK(bo);
3189 	return (bp);
3190 }
3191 
3192 /*
3193  * Returns true if no I/O is needed to access the
3194  * associated VM object.  This is like incore except
3195  * it also hunts around in the VM system for the data.
3196  */
3197 
3198 static int
3199 inmem(struct vnode * vp, daddr_t blkno)
3200 {
3201 	vm_object_t obj;
3202 	vm_offset_t toff, tinc, size;
3203 	vm_page_t m;
3204 	vm_ooffset_t off;
3205 
3206 	ASSERT_VOP_LOCKED(vp, "inmem");
3207 
3208 	if (incore(&vp->v_bufobj, blkno))
3209 		return 1;
3210 	if (vp->v_mount == NULL)
3211 		return 0;
3212 	obj = vp->v_object;
3213 	if (obj == NULL)
3214 		return (0);
3215 
3216 	size = PAGE_SIZE;
3217 	if (size > vp->v_mount->mnt_stat.f_iosize)
3218 		size = vp->v_mount->mnt_stat.f_iosize;
3219 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3220 
3221 	VM_OBJECT_RLOCK(obj);
3222 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3223 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
3224 		if (!m)
3225 			goto notinmem;
3226 		tinc = size;
3227 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3228 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3229 		if (vm_page_is_valid(m,
3230 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
3231 			goto notinmem;
3232 	}
3233 	VM_OBJECT_RUNLOCK(obj);
3234 	return 1;
3235 
3236 notinmem:
3237 	VM_OBJECT_RUNLOCK(obj);
3238 	return (0);
3239 }
3240 
3241 /*
3242  * Set the dirty range for a buffer based on the status of the dirty
3243  * bits in the pages comprising the buffer.  The range is limited
3244  * to the size of the buffer.
3245  *
3246  * Tell the VM system that the pages associated with this buffer
3247  * are clean.  This is used for delayed writes where the data is
3248  * going to go to disk eventually without additional VM intevention.
3249  *
3250  * Note that while we only really need to clean through to b_bcount, we
3251  * just go ahead and clean through to b_bufsize.
3252  */
3253 static void
3254 vfs_clean_pages_dirty_buf(struct buf *bp)
3255 {
3256 	vm_ooffset_t foff, noff, eoff;
3257 	vm_page_t m;
3258 	int i;
3259 
3260 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3261 		return;
3262 
3263 	foff = bp->b_offset;
3264 	KASSERT(bp->b_offset != NOOFFSET,
3265 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3266 
3267 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
3268 	vfs_drain_busy_pages(bp);
3269 	vfs_setdirty_locked_object(bp);
3270 	for (i = 0; i < bp->b_npages; i++) {
3271 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3272 		eoff = noff;
3273 		if (eoff > bp->b_offset + bp->b_bufsize)
3274 			eoff = bp->b_offset + bp->b_bufsize;
3275 		m = bp->b_pages[i];
3276 		vfs_page_set_validclean(bp, foff, m);
3277 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3278 		foff = noff;
3279 	}
3280 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
3281 }
3282 
3283 static void
3284 vfs_setdirty_locked_object(struct buf *bp)
3285 {
3286 	vm_object_t object;
3287 	int i;
3288 
3289 	object = bp->b_bufobj->bo_object;
3290 	VM_OBJECT_ASSERT_WLOCKED(object);
3291 
3292 	/*
3293 	 * We qualify the scan for modified pages on whether the
3294 	 * object has been flushed yet.
3295 	 */
3296 	if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) {
3297 		vm_offset_t boffset;
3298 		vm_offset_t eoffset;
3299 
3300 		/*
3301 		 * test the pages to see if they have been modified directly
3302 		 * by users through the VM system.
3303 		 */
3304 		for (i = 0; i < bp->b_npages; i++)
3305 			vm_page_test_dirty(bp->b_pages[i]);
3306 
3307 		/*
3308 		 * Calculate the encompassing dirty range, boffset and eoffset,
3309 		 * (eoffset - boffset) bytes.
3310 		 */
3311 
3312 		for (i = 0; i < bp->b_npages; i++) {
3313 			if (bp->b_pages[i]->dirty)
3314 				break;
3315 		}
3316 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3317 
3318 		for (i = bp->b_npages - 1; i >= 0; --i) {
3319 			if (bp->b_pages[i]->dirty) {
3320 				break;
3321 			}
3322 		}
3323 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3324 
3325 		/*
3326 		 * Fit it to the buffer.
3327 		 */
3328 
3329 		if (eoffset > bp->b_bcount)
3330 			eoffset = bp->b_bcount;
3331 
3332 		/*
3333 		 * If we have a good dirty range, merge with the existing
3334 		 * dirty range.
3335 		 */
3336 
3337 		if (boffset < eoffset) {
3338 			if (bp->b_dirtyoff > boffset)
3339 				bp->b_dirtyoff = boffset;
3340 			if (bp->b_dirtyend < eoffset)
3341 				bp->b_dirtyend = eoffset;
3342 		}
3343 	}
3344 }
3345 
3346 /*
3347  * Allocate the KVA mapping for an existing buffer.
3348  * If an unmapped buffer is provided but a mapped buffer is requested, take
3349  * also care to properly setup mappings between pages and KVA.
3350  */
3351 static void
3352 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3353 {
3354 	int bsize, maxsize, need_mapping, need_kva;
3355 	off_t offset;
3356 
3357 	need_mapping = bp->b_data == unmapped_buf &&
3358 	    (gbflags & GB_UNMAPPED) == 0;
3359 	need_kva = bp->b_kvabase == unmapped_buf &&
3360 	    bp->b_data == unmapped_buf &&
3361 	    (gbflags & GB_KVAALLOC) != 0;
3362 	if (!need_mapping && !need_kva)
3363 		return;
3364 
3365 	BUF_CHECK_UNMAPPED(bp);
3366 
3367 	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3368 		/*
3369 		 * Buffer is not mapped, but the KVA was already
3370 		 * reserved at the time of the instantiation.  Use the
3371 		 * allocated space.
3372 		 */
3373 		goto has_addr;
3374 	}
3375 
3376 	/*
3377 	 * Calculate the amount of the address space we would reserve
3378 	 * if the buffer was mapped.
3379 	 */
3380 	bsize = vn_isdisk(bp->b_vp, NULL) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3381 	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3382 	offset = blkno * bsize;
3383 	maxsize = size + (offset & PAGE_MASK);
3384 	maxsize = imax(maxsize, bsize);
3385 
3386 	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3387 		if ((gbflags & GB_NOWAIT_BD) != 0) {
3388 			/*
3389 			 * XXXKIB: defragmentation cannot
3390 			 * succeed, not sure what else to do.
3391 			 */
3392 			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3393 		}
3394 		atomic_add_int(&mappingrestarts, 1);
3395 		bufspace_wait(bp->b_vp, gbflags, 0, 0);
3396 	}
3397 has_addr:
3398 	if (need_mapping) {
3399 		/* b_offset is handled by bpmap_qenter. */
3400 		bp->b_data = bp->b_kvabase;
3401 		BUF_CHECK_MAPPED(bp);
3402 		bpmap_qenter(bp);
3403 	}
3404 }
3405 
3406 /*
3407  *	getblk:
3408  *
3409  *	Get a block given a specified block and offset into a file/device.
3410  *	The buffers B_DONE bit will be cleared on return, making it almost
3411  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3412  *	return.  The caller should clear B_INVAL prior to initiating a
3413  *	READ.
3414  *
3415  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3416  *	an existing buffer.
3417  *
3418  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3419  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3420  *	and then cleared based on the backing VM.  If the previous buffer is
3421  *	non-0-sized but invalid, B_CACHE will be cleared.
3422  *
3423  *	If getblk() must create a new buffer, the new buffer is returned with
3424  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3425  *	case it is returned with B_INVAL clear and B_CACHE set based on the
3426  *	backing VM.
3427  *
3428  *	getblk() also forces a bwrite() for any B_DELWRI buffer whos
3429  *	B_CACHE bit is clear.
3430  *
3431  *	What this means, basically, is that the caller should use B_CACHE to
3432  *	determine whether the buffer is fully valid or not and should clear
3433  *	B_INVAL prior to issuing a read.  If the caller intends to validate
3434  *	the buffer by loading its data area with something, the caller needs
3435  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3436  *	the caller should set B_CACHE ( as an optimization ), else the caller
3437  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3438  *	a write attempt or if it was a successfull read.  If the caller
3439  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3440  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3441  */
3442 struct buf *
3443 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3444     int flags)
3445 {
3446 	struct buf *bp;
3447 	struct bufobj *bo;
3448 	int bsize, error, maxsize, vmio;
3449 	off_t offset;
3450 
3451 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3452 	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3453 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3454 	ASSERT_VOP_LOCKED(vp, "getblk");
3455 	if (size > MAXBCACHEBUF)
3456 		panic("getblk: size(%d) > MAXBCACHEBUF(%d)\n", size,
3457 		    MAXBCACHEBUF);
3458 	if (!unmapped_buf_allowed)
3459 		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3460 
3461 	bo = &vp->v_bufobj;
3462 loop:
3463 	BO_RLOCK(bo);
3464 	bp = gbincore(bo, blkno);
3465 	if (bp != NULL) {
3466 		int lockflags;
3467 		/*
3468 		 * Buffer is in-core.  If the buffer is not busy nor managed,
3469 		 * it must be on a queue.
3470 		 */
3471 		lockflags = LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK;
3472 
3473 		if (flags & GB_LOCK_NOWAIT)
3474 			lockflags |= LK_NOWAIT;
3475 
3476 		error = BUF_TIMELOCK(bp, lockflags,
3477 		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
3478 
3479 		/*
3480 		 * If we slept and got the lock we have to restart in case
3481 		 * the buffer changed identities.
3482 		 */
3483 		if (error == ENOLCK)
3484 			goto loop;
3485 		/* We timed out or were interrupted. */
3486 		else if (error)
3487 			return (NULL);
3488 		/* If recursed, assume caller knows the rules. */
3489 		else if (BUF_LOCKRECURSED(bp))
3490 			goto end;
3491 
3492 		/*
3493 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
3494 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
3495 		 * and for a VMIO buffer B_CACHE is adjusted according to the
3496 		 * backing VM cache.
3497 		 */
3498 		if (bp->b_flags & B_INVAL)
3499 			bp->b_flags &= ~B_CACHE;
3500 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
3501 			bp->b_flags |= B_CACHE;
3502 		if (bp->b_flags & B_MANAGED)
3503 			MPASS(bp->b_qindex == QUEUE_NONE);
3504 		else
3505 			bremfree(bp);
3506 
3507 		/*
3508 		 * check for size inconsistencies for non-VMIO case.
3509 		 */
3510 		if (bp->b_bcount != size) {
3511 			if ((bp->b_flags & B_VMIO) == 0 ||
3512 			    (size > bp->b_kvasize)) {
3513 				if (bp->b_flags & B_DELWRI) {
3514 					/*
3515 					 * If buffer is pinned and caller does
3516 					 * not want sleep  waiting for it to be
3517 					 * unpinned, bail out
3518 					 * */
3519 					if (bp->b_pin_count > 0) {
3520 						if (flags & GB_LOCK_NOWAIT) {
3521 							bqrelse(bp);
3522 							return (NULL);
3523 						} else {
3524 							bunpin_wait(bp);
3525 						}
3526 					}
3527 					bp->b_flags |= B_NOCACHE;
3528 					bwrite(bp);
3529 				} else {
3530 					if (LIST_EMPTY(&bp->b_dep)) {
3531 						bp->b_flags |= B_RELBUF;
3532 						brelse(bp);
3533 					} else {
3534 						bp->b_flags |= B_NOCACHE;
3535 						bwrite(bp);
3536 					}
3537 				}
3538 				goto loop;
3539 			}
3540 		}
3541 
3542 		/*
3543 		 * Handle the case of unmapped buffer which should
3544 		 * become mapped, or the buffer for which KVA
3545 		 * reservation is requested.
3546 		 */
3547 		bp_unmapped_get_kva(bp, blkno, size, flags);
3548 
3549 		/*
3550 		 * If the size is inconsistant in the VMIO case, we can resize
3551 		 * the buffer.  This might lead to B_CACHE getting set or
3552 		 * cleared.  If the size has not changed, B_CACHE remains
3553 		 * unchanged from its previous state.
3554 		 */
3555 		allocbuf(bp, size);
3556 
3557 		KASSERT(bp->b_offset != NOOFFSET,
3558 		    ("getblk: no buffer offset"));
3559 
3560 		/*
3561 		 * A buffer with B_DELWRI set and B_CACHE clear must
3562 		 * be committed before we can return the buffer in
3563 		 * order to prevent the caller from issuing a read
3564 		 * ( due to B_CACHE not being set ) and overwriting
3565 		 * it.
3566 		 *
3567 		 * Most callers, including NFS and FFS, need this to
3568 		 * operate properly either because they assume they
3569 		 * can issue a read if B_CACHE is not set, or because
3570 		 * ( for example ) an uncached B_DELWRI might loop due
3571 		 * to softupdates re-dirtying the buffer.  In the latter
3572 		 * case, B_CACHE is set after the first write completes,
3573 		 * preventing further loops.
3574 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
3575 		 * above while extending the buffer, we cannot allow the
3576 		 * buffer to remain with B_CACHE set after the write
3577 		 * completes or it will represent a corrupt state.  To
3578 		 * deal with this we set B_NOCACHE to scrap the buffer
3579 		 * after the write.
3580 		 *
3581 		 * We might be able to do something fancy, like setting
3582 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
3583 		 * so the below call doesn't set B_CACHE, but that gets real
3584 		 * confusing.  This is much easier.
3585 		 */
3586 
3587 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
3588 			bp->b_flags |= B_NOCACHE;
3589 			bwrite(bp);
3590 			goto loop;
3591 		}
3592 		bp->b_flags &= ~B_DONE;
3593 	} else {
3594 		/*
3595 		 * Buffer is not in-core, create new buffer.  The buffer
3596 		 * returned by getnewbuf() is locked.  Note that the returned
3597 		 * buffer is also considered valid (not marked B_INVAL).
3598 		 */
3599 		BO_RUNLOCK(bo);
3600 		/*
3601 		 * If the user does not want us to create the buffer, bail out
3602 		 * here.
3603 		 */
3604 		if (flags & GB_NOCREAT)
3605 			return NULL;
3606 		if (numfreebuffers == 0 && TD_IS_IDLETHREAD(curthread))
3607 			return NULL;
3608 
3609 		bsize = vn_isdisk(vp, NULL) ? DEV_BSIZE : bo->bo_bsize;
3610 		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3611 		offset = blkno * bsize;
3612 		vmio = vp->v_object != NULL;
3613 		if (vmio) {
3614 			maxsize = size + (offset & PAGE_MASK);
3615 		} else {
3616 			maxsize = size;
3617 			/* Do not allow non-VMIO notmapped buffers. */
3618 			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3619 		}
3620 		maxsize = imax(maxsize, bsize);
3621 
3622 		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
3623 		if (bp == NULL) {
3624 			if (slpflag || slptimeo)
3625 				return NULL;
3626 			goto loop;
3627 		}
3628 
3629 		/*
3630 		 * This code is used to make sure that a buffer is not
3631 		 * created while the getnewbuf routine is blocked.
3632 		 * This can be a problem whether the vnode is locked or not.
3633 		 * If the buffer is created out from under us, we have to
3634 		 * throw away the one we just created.
3635 		 *
3636 		 * Note: this must occur before we associate the buffer
3637 		 * with the vp especially considering limitations in
3638 		 * the splay tree implementation when dealing with duplicate
3639 		 * lblkno's.
3640 		 */
3641 		BO_LOCK(bo);
3642 		if (gbincore(bo, blkno)) {
3643 			BO_UNLOCK(bo);
3644 			bp->b_flags |= B_INVAL;
3645 			brelse(bp);
3646 			bufspace_release(maxsize);
3647 			goto loop;
3648 		}
3649 
3650 		/*
3651 		 * Insert the buffer into the hash, so that it can
3652 		 * be found by incore.
3653 		 */
3654 		bp->b_blkno = bp->b_lblkno = blkno;
3655 		bp->b_offset = offset;
3656 		bgetvp(vp, bp);
3657 		BO_UNLOCK(bo);
3658 
3659 		/*
3660 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
3661 		 * buffer size starts out as 0, B_CACHE will be set by
3662 		 * allocbuf() for the VMIO case prior to it testing the
3663 		 * backing store for validity.
3664 		 */
3665 
3666 		if (vmio) {
3667 			bp->b_flags |= B_VMIO;
3668 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
3669 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
3670 			    bp, vp->v_object, bp->b_bufobj->bo_object));
3671 		} else {
3672 			bp->b_flags &= ~B_VMIO;
3673 			KASSERT(bp->b_bufobj->bo_object == NULL,
3674 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
3675 			    bp, bp->b_bufobj->bo_object));
3676 			BUF_CHECK_MAPPED(bp);
3677 		}
3678 
3679 		allocbuf(bp, size);
3680 		bufspace_release(maxsize);
3681 		bp->b_flags &= ~B_DONE;
3682 	}
3683 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
3684 	BUF_ASSERT_HELD(bp);
3685 end:
3686 	KASSERT(bp->b_bufobj == bo,
3687 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3688 	return (bp);
3689 }
3690 
3691 /*
3692  * Get an empty, disassociated buffer of given size.  The buffer is initially
3693  * set to B_INVAL.
3694  */
3695 struct buf *
3696 geteblk(int size, int flags)
3697 {
3698 	struct buf *bp;
3699 	int maxsize;
3700 
3701 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
3702 	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
3703 		if ((flags & GB_NOWAIT_BD) &&
3704 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
3705 			return (NULL);
3706 	}
3707 	allocbuf(bp, size);
3708 	bufspace_release(maxsize);
3709 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
3710 	BUF_ASSERT_HELD(bp);
3711 	return (bp);
3712 }
3713 
3714 /*
3715  * Truncate the backing store for a non-vmio buffer.
3716  */
3717 static void
3718 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
3719 {
3720 
3721 	if (bp->b_flags & B_MALLOC) {
3722 		/*
3723 		 * malloced buffers are not shrunk
3724 		 */
3725 		if (newbsize == 0) {
3726 			bufmallocadjust(bp, 0);
3727 			free(bp->b_data, M_BIOBUF);
3728 			bp->b_data = bp->b_kvabase;
3729 			bp->b_flags &= ~B_MALLOC;
3730 		}
3731 		return;
3732 	}
3733 	vm_hold_free_pages(bp, newbsize);
3734 	bufspace_adjust(bp, newbsize);
3735 }
3736 
3737 /*
3738  * Extend the backing for a non-VMIO buffer.
3739  */
3740 static void
3741 vfs_nonvmio_extend(struct buf *bp, int newbsize)
3742 {
3743 	caddr_t origbuf;
3744 	int origbufsize;
3745 
3746 	/*
3747 	 * We only use malloced memory on the first allocation.
3748 	 * and revert to page-allocated memory when the buffer
3749 	 * grows.
3750 	 *
3751 	 * There is a potential smp race here that could lead
3752 	 * to bufmallocspace slightly passing the max.  It
3753 	 * is probably extremely rare and not worth worrying
3754 	 * over.
3755 	 */
3756 	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
3757 	    bufmallocspace < maxbufmallocspace) {
3758 		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
3759 		bp->b_flags |= B_MALLOC;
3760 		bufmallocadjust(bp, newbsize);
3761 		return;
3762 	}
3763 
3764 	/*
3765 	 * If the buffer is growing on its other-than-first
3766 	 * allocation then we revert to the page-allocation
3767 	 * scheme.
3768 	 */
3769 	origbuf = NULL;
3770 	origbufsize = 0;
3771 	if (bp->b_flags & B_MALLOC) {
3772 		origbuf = bp->b_data;
3773 		origbufsize = bp->b_bufsize;
3774 		bp->b_data = bp->b_kvabase;
3775 		bufmallocadjust(bp, 0);
3776 		bp->b_flags &= ~B_MALLOC;
3777 		newbsize = round_page(newbsize);
3778 	}
3779 	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
3780 	    (vm_offset_t) bp->b_data + newbsize);
3781 	if (origbuf != NULL) {
3782 		bcopy(origbuf, bp->b_data, origbufsize);
3783 		free(origbuf, M_BIOBUF);
3784 	}
3785 	bufspace_adjust(bp, newbsize);
3786 }
3787 
3788 /*
3789  * This code constitutes the buffer memory from either anonymous system
3790  * memory (in the case of non-VMIO operations) or from an associated
3791  * VM object (in the case of VMIO operations).  This code is able to
3792  * resize a buffer up or down.
3793  *
3794  * Note that this code is tricky, and has many complications to resolve
3795  * deadlock or inconsistant data situations.  Tread lightly!!!
3796  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
3797  * the caller.  Calling this code willy nilly can result in the loss of data.
3798  *
3799  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
3800  * B_CACHE for the non-VMIO case.
3801  */
3802 int
3803 allocbuf(struct buf *bp, int size)
3804 {
3805 	int newbsize;
3806 
3807 	BUF_ASSERT_HELD(bp);
3808 
3809 	if (bp->b_bcount == size)
3810 		return (1);
3811 
3812 	if (bp->b_kvasize != 0 && bp->b_kvasize < size)
3813 		panic("allocbuf: buffer too small");
3814 
3815 	newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
3816 	if ((bp->b_flags & B_VMIO) == 0) {
3817 		if ((bp->b_flags & B_MALLOC) == 0)
3818 			newbsize = round_page(newbsize);
3819 		/*
3820 		 * Just get anonymous memory from the kernel.  Don't
3821 		 * mess with B_CACHE.
3822 		 */
3823 		if (newbsize < bp->b_bufsize)
3824 			vfs_nonvmio_truncate(bp, newbsize);
3825 		else if (newbsize > bp->b_bufsize)
3826 			vfs_nonvmio_extend(bp, newbsize);
3827 	} else {
3828 		int desiredpages;
3829 
3830 		desiredpages = (size == 0) ? 0 :
3831 		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
3832 
3833 		if (bp->b_flags & B_MALLOC)
3834 			panic("allocbuf: VMIO buffer can't be malloced");
3835 		/*
3836 		 * Set B_CACHE initially if buffer is 0 length or will become
3837 		 * 0-length.
3838 		 */
3839 		if (size == 0 || bp->b_bufsize == 0)
3840 			bp->b_flags |= B_CACHE;
3841 
3842 		if (newbsize < bp->b_bufsize)
3843 			vfs_vmio_truncate(bp, desiredpages);
3844 		/* XXX This looks as if it should be newbsize > b_bufsize */
3845 		else if (size > bp->b_bcount)
3846 			vfs_vmio_extend(bp, desiredpages, size);
3847 		bufspace_adjust(bp, newbsize);
3848 	}
3849 	bp->b_bcount = size;		/* requested buffer size. */
3850 	return (1);
3851 }
3852 
3853 extern int inflight_transient_maps;
3854 
3855 void
3856 biodone(struct bio *bp)
3857 {
3858 	struct mtx *mtxp;
3859 	void (*done)(struct bio *);
3860 	vm_offset_t start, end;
3861 
3862 	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
3863 		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
3864 		bp->bio_flags |= BIO_UNMAPPED;
3865 		start = trunc_page((vm_offset_t)bp->bio_data);
3866 		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
3867 		bp->bio_data = unmapped_buf;
3868 		pmap_qremove(start, OFF_TO_IDX(end - start));
3869 		vmem_free(transient_arena, start, end - start);
3870 		atomic_add_int(&inflight_transient_maps, -1);
3871 	}
3872 	done = bp->bio_done;
3873 	if (done == NULL) {
3874 		mtxp = mtx_pool_find(mtxpool_sleep, bp);
3875 		mtx_lock(mtxp);
3876 		bp->bio_flags |= BIO_DONE;
3877 		wakeup(bp);
3878 		mtx_unlock(mtxp);
3879 	} else {
3880 		bp->bio_flags |= BIO_DONE;
3881 		done(bp);
3882 	}
3883 }
3884 
3885 /*
3886  * Wait for a BIO to finish.
3887  */
3888 int
3889 biowait(struct bio *bp, const char *wchan)
3890 {
3891 	struct mtx *mtxp;
3892 
3893 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
3894 	mtx_lock(mtxp);
3895 	while ((bp->bio_flags & BIO_DONE) == 0)
3896 		msleep(bp, mtxp, PRIBIO, wchan, 0);
3897 	mtx_unlock(mtxp);
3898 	if (bp->bio_error != 0)
3899 		return (bp->bio_error);
3900 	if (!(bp->bio_flags & BIO_ERROR))
3901 		return (0);
3902 	return (EIO);
3903 }
3904 
3905 void
3906 biofinish(struct bio *bp, struct devstat *stat, int error)
3907 {
3908 
3909 	if (error) {
3910 		bp->bio_error = error;
3911 		bp->bio_flags |= BIO_ERROR;
3912 	}
3913 	if (stat != NULL)
3914 		devstat_end_transaction_bio(stat, bp);
3915 	biodone(bp);
3916 }
3917 
3918 /*
3919  *	bufwait:
3920  *
3921  *	Wait for buffer I/O completion, returning error status.  The buffer
3922  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
3923  *	error and cleared.
3924  */
3925 int
3926 bufwait(struct buf *bp)
3927 {
3928 	if (bp->b_iocmd == BIO_READ)
3929 		bwait(bp, PRIBIO, "biord");
3930 	else
3931 		bwait(bp, PRIBIO, "biowr");
3932 	if (bp->b_flags & B_EINTR) {
3933 		bp->b_flags &= ~B_EINTR;
3934 		return (EINTR);
3935 	}
3936 	if (bp->b_ioflags & BIO_ERROR) {
3937 		return (bp->b_error ? bp->b_error : EIO);
3938 	} else {
3939 		return (0);
3940 	}
3941 }
3942 
3943 /*
3944  *	bufdone:
3945  *
3946  *	Finish I/O on a buffer, optionally calling a completion function.
3947  *	This is usually called from an interrupt so process blocking is
3948  *	not allowed.
3949  *
3950  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
3951  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
3952  *	assuming B_INVAL is clear.
3953  *
3954  *	For the VMIO case, we set B_CACHE if the op was a read and no
3955  *	read error occured, or if the op was a write.  B_CACHE is never
3956  *	set if the buffer is invalid or otherwise uncacheable.
3957  *
3958  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
3959  *	initiator to leave B_INVAL set to brelse the buffer out of existance
3960  *	in the biodone routine.
3961  */
3962 void
3963 bufdone(struct buf *bp)
3964 {
3965 	struct bufobj *dropobj;
3966 	void    (*biodone)(struct buf *);
3967 
3968 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
3969 	dropobj = NULL;
3970 
3971 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
3972 	BUF_ASSERT_HELD(bp);
3973 
3974 	runningbufwakeup(bp);
3975 	if (bp->b_iocmd == BIO_WRITE)
3976 		dropobj = bp->b_bufobj;
3977 	/* call optional completion function if requested */
3978 	if (bp->b_iodone != NULL) {
3979 		biodone = bp->b_iodone;
3980 		bp->b_iodone = NULL;
3981 		(*biodone) (bp);
3982 		if (dropobj)
3983 			bufobj_wdrop(dropobj);
3984 		return;
3985 	}
3986 
3987 	bufdone_finish(bp);
3988 
3989 	if (dropobj)
3990 		bufobj_wdrop(dropobj);
3991 }
3992 
3993 void
3994 bufdone_finish(struct buf *bp)
3995 {
3996 	BUF_ASSERT_HELD(bp);
3997 
3998 	if (!LIST_EMPTY(&bp->b_dep))
3999 		buf_complete(bp);
4000 
4001 	if (bp->b_flags & B_VMIO) {
4002 		/*
4003 		 * Set B_CACHE if the op was a normal read and no error
4004 		 * occured.  B_CACHE is set for writes in the b*write()
4005 		 * routines.
4006 		 */
4007 		if (bp->b_iocmd == BIO_READ &&
4008 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4009 		    !(bp->b_ioflags & BIO_ERROR))
4010 			bp->b_flags |= B_CACHE;
4011 		vfs_vmio_iodone(bp);
4012 	}
4013 
4014 	/*
4015 	 * For asynchronous completions, release the buffer now. The brelse
4016 	 * will do a wakeup there if necessary - so no need to do a wakeup
4017 	 * here in the async case. The sync case always needs to do a wakeup.
4018 	 */
4019 	if (bp->b_flags & B_ASYNC) {
4020 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4021 		    (bp->b_ioflags & BIO_ERROR))
4022 			brelse(bp);
4023 		else
4024 			bqrelse(bp);
4025 	} else
4026 		bdone(bp);
4027 }
4028 
4029 /*
4030  * This routine is called in lieu of iodone in the case of
4031  * incomplete I/O.  This keeps the busy status for pages
4032  * consistant.
4033  */
4034 void
4035 vfs_unbusy_pages(struct buf *bp)
4036 {
4037 	int i;
4038 	vm_object_t obj;
4039 	vm_page_t m;
4040 
4041 	runningbufwakeup(bp);
4042 	if (!(bp->b_flags & B_VMIO))
4043 		return;
4044 
4045 	obj = bp->b_bufobj->bo_object;
4046 	VM_OBJECT_WLOCK(obj);
4047 	for (i = 0; i < bp->b_npages; i++) {
4048 		m = bp->b_pages[i];
4049 		if (m == bogus_page) {
4050 			m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4051 			if (!m)
4052 				panic("vfs_unbusy_pages: page missing\n");
4053 			bp->b_pages[i] = m;
4054 			if (buf_mapped(bp)) {
4055 				BUF_CHECK_MAPPED(bp);
4056 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4057 				    bp->b_pages, bp->b_npages);
4058 			} else
4059 				BUF_CHECK_UNMAPPED(bp);
4060 		}
4061 		vm_page_sunbusy(m);
4062 	}
4063 	vm_object_pip_wakeupn(obj, bp->b_npages);
4064 	VM_OBJECT_WUNLOCK(obj);
4065 }
4066 
4067 /*
4068  * vfs_page_set_valid:
4069  *
4070  *	Set the valid bits in a page based on the supplied offset.   The
4071  *	range is restricted to the buffer's size.
4072  *
4073  *	This routine is typically called after a read completes.
4074  */
4075 static void
4076 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4077 {
4078 	vm_ooffset_t eoff;
4079 
4080 	/*
4081 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4082 	 * page boundary and eoff is not greater than the end of the buffer.
4083 	 * The end of the buffer, in this case, is our file EOF, not the
4084 	 * allocation size of the buffer.
4085 	 */
4086 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4087 	if (eoff > bp->b_offset + bp->b_bcount)
4088 		eoff = bp->b_offset + bp->b_bcount;
4089 
4090 	/*
4091 	 * Set valid range.  This is typically the entire buffer and thus the
4092 	 * entire page.
4093 	 */
4094 	if (eoff > off)
4095 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4096 }
4097 
4098 /*
4099  * vfs_page_set_validclean:
4100  *
4101  *	Set the valid bits and clear the dirty bits in a page based on the
4102  *	supplied offset.   The range is restricted to the buffer's size.
4103  */
4104 static void
4105 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4106 {
4107 	vm_ooffset_t soff, eoff;
4108 
4109 	/*
4110 	 * Start and end offsets in buffer.  eoff - soff may not cross a
4111 	 * page boundry or cross the end of the buffer.  The end of the
4112 	 * buffer, in this case, is our file EOF, not the allocation size
4113 	 * of the buffer.
4114 	 */
4115 	soff = off;
4116 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4117 	if (eoff > bp->b_offset + bp->b_bcount)
4118 		eoff = bp->b_offset + bp->b_bcount;
4119 
4120 	/*
4121 	 * Set valid range.  This is typically the entire buffer and thus the
4122 	 * entire page.
4123 	 */
4124 	if (eoff > soff) {
4125 		vm_page_set_validclean(
4126 		    m,
4127 		   (vm_offset_t) (soff & PAGE_MASK),
4128 		   (vm_offset_t) (eoff - soff)
4129 		);
4130 	}
4131 }
4132 
4133 /*
4134  * Ensure that all buffer pages are not exclusive busied.  If any page is
4135  * exclusive busy, drain it.
4136  */
4137 void
4138 vfs_drain_busy_pages(struct buf *bp)
4139 {
4140 	vm_page_t m;
4141 	int i, last_busied;
4142 
4143 	VM_OBJECT_ASSERT_WLOCKED(bp->b_bufobj->bo_object);
4144 	last_busied = 0;
4145 	for (i = 0; i < bp->b_npages; i++) {
4146 		m = bp->b_pages[i];
4147 		if (vm_page_xbusied(m)) {
4148 			for (; last_busied < i; last_busied++)
4149 				vm_page_sbusy(bp->b_pages[last_busied]);
4150 			while (vm_page_xbusied(m)) {
4151 				vm_page_lock(m);
4152 				VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4153 				vm_page_busy_sleep(m, "vbpage");
4154 				VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4155 			}
4156 		}
4157 	}
4158 	for (i = 0; i < last_busied; i++)
4159 		vm_page_sunbusy(bp->b_pages[i]);
4160 }
4161 
4162 /*
4163  * This routine is called before a device strategy routine.
4164  * It is used to tell the VM system that paging I/O is in
4165  * progress, and treat the pages associated with the buffer
4166  * almost as being exclusive busy.  Also the object paging_in_progress
4167  * flag is handled to make sure that the object doesn't become
4168  * inconsistant.
4169  *
4170  * Since I/O has not been initiated yet, certain buffer flags
4171  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
4172  * and should be ignored.
4173  */
4174 void
4175 vfs_busy_pages(struct buf *bp, int clear_modify)
4176 {
4177 	int i, bogus;
4178 	vm_object_t obj;
4179 	vm_ooffset_t foff;
4180 	vm_page_t m;
4181 
4182 	if (!(bp->b_flags & B_VMIO))
4183 		return;
4184 
4185 	obj = bp->b_bufobj->bo_object;
4186 	foff = bp->b_offset;
4187 	KASSERT(bp->b_offset != NOOFFSET,
4188 	    ("vfs_busy_pages: no buffer offset"));
4189 	VM_OBJECT_WLOCK(obj);
4190 	vfs_drain_busy_pages(bp);
4191 	if (bp->b_bufsize != 0)
4192 		vfs_setdirty_locked_object(bp);
4193 	bogus = 0;
4194 	for (i = 0; i < bp->b_npages; i++) {
4195 		m = bp->b_pages[i];
4196 
4197 		if ((bp->b_flags & B_CLUSTER) == 0) {
4198 			vm_object_pip_add(obj, 1);
4199 			vm_page_sbusy(m);
4200 		}
4201 		/*
4202 		 * When readying a buffer for a read ( i.e
4203 		 * clear_modify == 0 ), it is important to do
4204 		 * bogus_page replacement for valid pages in
4205 		 * partially instantiated buffers.  Partially
4206 		 * instantiated buffers can, in turn, occur when
4207 		 * reconstituting a buffer from its VM backing store
4208 		 * base.  We only have to do this if B_CACHE is
4209 		 * clear ( which causes the I/O to occur in the
4210 		 * first place ).  The replacement prevents the read
4211 		 * I/O from overwriting potentially dirty VM-backed
4212 		 * pages.  XXX bogus page replacement is, uh, bogus.
4213 		 * It may not work properly with small-block devices.
4214 		 * We need to find a better way.
4215 		 */
4216 		if (clear_modify) {
4217 			pmap_remove_write(m);
4218 			vfs_page_set_validclean(bp, foff, m);
4219 		} else if (m->valid == VM_PAGE_BITS_ALL &&
4220 		    (bp->b_flags & B_CACHE) == 0) {
4221 			bp->b_pages[i] = bogus_page;
4222 			bogus++;
4223 		}
4224 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4225 	}
4226 	VM_OBJECT_WUNLOCK(obj);
4227 	if (bogus && buf_mapped(bp)) {
4228 		BUF_CHECK_MAPPED(bp);
4229 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4230 		    bp->b_pages, bp->b_npages);
4231 	}
4232 }
4233 
4234 /*
4235  *	vfs_bio_set_valid:
4236  *
4237  *	Set the range within the buffer to valid.  The range is
4238  *	relative to the beginning of the buffer, b_offset.  Note that
4239  *	b_offset itself may be offset from the beginning of the first
4240  *	page.
4241  */
4242 void
4243 vfs_bio_set_valid(struct buf *bp, int base, int size)
4244 {
4245 	int i, n;
4246 	vm_page_t m;
4247 
4248 	if (!(bp->b_flags & B_VMIO))
4249 		return;
4250 
4251 	/*
4252 	 * Fixup base to be relative to beginning of first page.
4253 	 * Set initial n to be the maximum number of bytes in the
4254 	 * first page that can be validated.
4255 	 */
4256 	base += (bp->b_offset & PAGE_MASK);
4257 	n = PAGE_SIZE - (base & PAGE_MASK);
4258 
4259 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4260 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4261 		m = bp->b_pages[i];
4262 		if (n > size)
4263 			n = size;
4264 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4265 		base += n;
4266 		size -= n;
4267 		n = PAGE_SIZE;
4268 	}
4269 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4270 }
4271 
4272 /*
4273  *	vfs_bio_clrbuf:
4274  *
4275  *	If the specified buffer is a non-VMIO buffer, clear the entire
4276  *	buffer.  If the specified buffer is a VMIO buffer, clear and
4277  *	validate only the previously invalid portions of the buffer.
4278  *	This routine essentially fakes an I/O, so we need to clear
4279  *	BIO_ERROR and B_INVAL.
4280  *
4281  *	Note that while we only theoretically need to clear through b_bcount,
4282  *	we go ahead and clear through b_bufsize.
4283  */
4284 void
4285 vfs_bio_clrbuf(struct buf *bp)
4286 {
4287 	int i, j, mask, sa, ea, slide;
4288 
4289 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4290 		clrbuf(bp);
4291 		return;
4292 	}
4293 	bp->b_flags &= ~B_INVAL;
4294 	bp->b_ioflags &= ~BIO_ERROR;
4295 	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
4296 	if ((bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
4297 	    (bp->b_offset & PAGE_MASK) == 0) {
4298 		if (bp->b_pages[0] == bogus_page)
4299 			goto unlock;
4300 		mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
4301 		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[0]->object);
4302 		if ((bp->b_pages[0]->valid & mask) == mask)
4303 			goto unlock;
4304 		if ((bp->b_pages[0]->valid & mask) == 0) {
4305 			pmap_zero_page_area(bp->b_pages[0], 0, bp->b_bufsize);
4306 			bp->b_pages[0]->valid |= mask;
4307 			goto unlock;
4308 		}
4309 	}
4310 	sa = bp->b_offset & PAGE_MASK;
4311 	slide = 0;
4312 	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4313 		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4314 		ea = slide & PAGE_MASK;
4315 		if (ea == 0)
4316 			ea = PAGE_SIZE;
4317 		if (bp->b_pages[i] == bogus_page)
4318 			continue;
4319 		j = sa / DEV_BSIZE;
4320 		mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
4321 		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[i]->object);
4322 		if ((bp->b_pages[i]->valid & mask) == mask)
4323 			continue;
4324 		if ((bp->b_pages[i]->valid & mask) == 0)
4325 			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4326 		else {
4327 			for (; sa < ea; sa += DEV_BSIZE, j++) {
4328 				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4329 					pmap_zero_page_area(bp->b_pages[i],
4330 					    sa, DEV_BSIZE);
4331 				}
4332 			}
4333 		}
4334 		bp->b_pages[i]->valid |= mask;
4335 	}
4336 unlock:
4337 	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
4338 	bp->b_resid = 0;
4339 }
4340 
4341 void
4342 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4343 {
4344 	vm_page_t m;
4345 	int i, n;
4346 
4347 	if (buf_mapped(bp)) {
4348 		BUF_CHECK_MAPPED(bp);
4349 		bzero(bp->b_data + base, size);
4350 	} else {
4351 		BUF_CHECK_UNMAPPED(bp);
4352 		n = PAGE_SIZE - (base & PAGE_MASK);
4353 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4354 			m = bp->b_pages[i];
4355 			if (n > size)
4356 				n = size;
4357 			pmap_zero_page_area(m, base & PAGE_MASK, n);
4358 			base += n;
4359 			size -= n;
4360 			n = PAGE_SIZE;
4361 		}
4362 	}
4363 }
4364 
4365 /*
4366  * vm_hold_load_pages and vm_hold_free_pages get pages into
4367  * a buffers address space.  The pages are anonymous and are
4368  * not associated with a file object.
4369  */
4370 static void
4371 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4372 {
4373 	vm_offset_t pg;
4374 	vm_page_t p;
4375 	int index;
4376 
4377 	BUF_CHECK_MAPPED(bp);
4378 
4379 	to = round_page(to);
4380 	from = round_page(from);
4381 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4382 
4383 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
4384 tryagain:
4385 		/*
4386 		 * note: must allocate system pages since blocking here
4387 		 * could interfere with paging I/O, no matter which
4388 		 * process we are.
4389 		 */
4390 		p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
4391 		    VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
4392 		if (p == NULL) {
4393 			VM_WAIT;
4394 			goto tryagain;
4395 		}
4396 		pmap_qenter(pg, &p, 1);
4397 		bp->b_pages[index] = p;
4398 	}
4399 	bp->b_npages = index;
4400 }
4401 
4402 /* Return pages associated with this buf to the vm system */
4403 static void
4404 vm_hold_free_pages(struct buf *bp, int newbsize)
4405 {
4406 	vm_offset_t from;
4407 	vm_page_t p;
4408 	int index, newnpages;
4409 
4410 	BUF_CHECK_MAPPED(bp);
4411 
4412 	from = round_page((vm_offset_t)bp->b_data + newbsize);
4413 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4414 	if (bp->b_npages > newnpages)
4415 		pmap_qremove(from, bp->b_npages - newnpages);
4416 	for (index = newnpages; index < bp->b_npages; index++) {
4417 		p = bp->b_pages[index];
4418 		bp->b_pages[index] = NULL;
4419 		if (vm_page_sbusied(p))
4420 			printf("vm_hold_free_pages: blkno: %jd, lblkno: %jd\n",
4421 			    (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno);
4422 		p->wire_count--;
4423 		vm_page_free(p);
4424 		atomic_subtract_int(&vm_cnt.v_wire_count, 1);
4425 	}
4426 	bp->b_npages = newnpages;
4427 }
4428 
4429 /*
4430  * Map an IO request into kernel virtual address space.
4431  *
4432  * All requests are (re)mapped into kernel VA space.
4433  * Notice that we use b_bufsize for the size of the buffer
4434  * to be mapped.  b_bcount might be modified by the driver.
4435  *
4436  * Note that even if the caller determines that the address space should
4437  * be valid, a race or a smaller-file mapped into a larger space may
4438  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
4439  * check the return value.
4440  *
4441  * This function only works with pager buffers.
4442  */
4443 int
4444 vmapbuf(struct buf *bp, int mapbuf)
4445 {
4446 	vm_prot_t prot;
4447 	int pidx;
4448 
4449 	if (bp->b_bufsize < 0)
4450 		return (-1);
4451 	prot = VM_PROT_READ;
4452 	if (bp->b_iocmd == BIO_READ)
4453 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
4454 	if ((pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
4455 	    (vm_offset_t)bp->b_data, bp->b_bufsize, prot, bp->b_pages,
4456 	    btoc(MAXPHYS))) < 0)
4457 		return (-1);
4458 	bp->b_npages = pidx;
4459 	bp->b_offset = ((vm_offset_t)bp->b_data) & PAGE_MASK;
4460 	if (mapbuf || !unmapped_buf_allowed) {
4461 		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
4462 		bp->b_data = bp->b_kvabase + bp->b_offset;
4463 	} else
4464 		bp->b_data = unmapped_buf;
4465 	return(0);
4466 }
4467 
4468 /*
4469  * Free the io map PTEs associated with this IO operation.
4470  * We also invalidate the TLB entries and restore the original b_addr.
4471  *
4472  * This function only works with pager buffers.
4473  */
4474 void
4475 vunmapbuf(struct buf *bp)
4476 {
4477 	int npages;
4478 
4479 	npages = bp->b_npages;
4480 	if (buf_mapped(bp))
4481 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
4482 	vm_page_unhold_pages(bp->b_pages, npages);
4483 
4484 	bp->b_data = unmapped_buf;
4485 }
4486 
4487 void
4488 bdone(struct buf *bp)
4489 {
4490 	struct mtx *mtxp;
4491 
4492 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4493 	mtx_lock(mtxp);
4494 	bp->b_flags |= B_DONE;
4495 	wakeup(bp);
4496 	mtx_unlock(mtxp);
4497 }
4498 
4499 void
4500 bwait(struct buf *bp, u_char pri, const char *wchan)
4501 {
4502 	struct mtx *mtxp;
4503 
4504 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4505 	mtx_lock(mtxp);
4506 	while ((bp->b_flags & B_DONE) == 0)
4507 		msleep(bp, mtxp, pri, wchan, 0);
4508 	mtx_unlock(mtxp);
4509 }
4510 
4511 int
4512 bufsync(struct bufobj *bo, int waitfor)
4513 {
4514 
4515 	return (VOP_FSYNC(bo->__bo_vnode, waitfor, curthread));
4516 }
4517 
4518 void
4519 bufstrategy(struct bufobj *bo, struct buf *bp)
4520 {
4521 	int i = 0;
4522 	struct vnode *vp;
4523 
4524 	vp = bp->b_vp;
4525 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
4526 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
4527 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
4528 	i = VOP_STRATEGY(vp, bp);
4529 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
4530 }
4531 
4532 void
4533 bufobj_wrefl(struct bufobj *bo)
4534 {
4535 
4536 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4537 	ASSERT_BO_WLOCKED(bo);
4538 	bo->bo_numoutput++;
4539 }
4540 
4541 void
4542 bufobj_wref(struct bufobj *bo)
4543 {
4544 
4545 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
4546 	BO_LOCK(bo);
4547 	bo->bo_numoutput++;
4548 	BO_UNLOCK(bo);
4549 }
4550 
4551 void
4552 bufobj_wdrop(struct bufobj *bo)
4553 {
4554 
4555 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
4556 	BO_LOCK(bo);
4557 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
4558 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
4559 		bo->bo_flag &= ~BO_WWAIT;
4560 		wakeup(&bo->bo_numoutput);
4561 	}
4562 	BO_UNLOCK(bo);
4563 }
4564 
4565 int
4566 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
4567 {
4568 	int error;
4569 
4570 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
4571 	ASSERT_BO_WLOCKED(bo);
4572 	error = 0;
4573 	while (bo->bo_numoutput) {
4574 		bo->bo_flag |= BO_WWAIT;
4575 		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
4576 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
4577 		if (error)
4578 			break;
4579 	}
4580 	return (error);
4581 }
4582 
4583 void
4584 bpin(struct buf *bp)
4585 {
4586 	struct mtx *mtxp;
4587 
4588 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4589 	mtx_lock(mtxp);
4590 	bp->b_pin_count++;
4591 	mtx_unlock(mtxp);
4592 }
4593 
4594 void
4595 bunpin(struct buf *bp)
4596 {
4597 	struct mtx *mtxp;
4598 
4599 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4600 	mtx_lock(mtxp);
4601 	if (--bp->b_pin_count == 0)
4602 		wakeup(bp);
4603 	mtx_unlock(mtxp);
4604 }
4605 
4606 void
4607 bunpin_wait(struct buf *bp)
4608 {
4609 	struct mtx *mtxp;
4610 
4611 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4612 	mtx_lock(mtxp);
4613 	while (bp->b_pin_count > 0)
4614 		msleep(bp, mtxp, PRIBIO, "bwunpin", 0);
4615 	mtx_unlock(mtxp);
4616 }
4617 
4618 /*
4619  * Set bio_data or bio_ma for struct bio from the struct buf.
4620  */
4621 void
4622 bdata2bio(struct buf *bp, struct bio *bip)
4623 {
4624 
4625 	if (!buf_mapped(bp)) {
4626 		KASSERT(unmapped_buf_allowed, ("unmapped"));
4627 		bip->bio_ma = bp->b_pages;
4628 		bip->bio_ma_n = bp->b_npages;
4629 		bip->bio_data = unmapped_buf;
4630 		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
4631 		bip->bio_flags |= BIO_UNMAPPED;
4632 		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
4633 		    PAGE_SIZE == bp->b_npages,
4634 		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
4635 		    (long long)bip->bio_length, bip->bio_ma_n));
4636 	} else {
4637 		bip->bio_data = bp->b_data;
4638 		bip->bio_ma = NULL;
4639 	}
4640 }
4641 
4642 #include "opt_ddb.h"
4643 #ifdef DDB
4644 #include <ddb/ddb.h>
4645 
4646 /* DDB command to show buffer data */
4647 DB_SHOW_COMMAND(buffer, db_show_buffer)
4648 {
4649 	/* get args */
4650 	struct buf *bp = (struct buf *)addr;
4651 
4652 	if (!have_addr) {
4653 		db_printf("usage: show buffer <addr>\n");
4654 		return;
4655 	}
4656 
4657 	db_printf("buf at %p\n", bp);
4658 	db_printf("b_flags = 0x%b, b_xflags=0x%b, b_vflags=0x%b\n",
4659 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS, (u_int)bp->b_xflags,
4660 	    PRINT_BUF_XFLAGS, (u_int)bp->b_vflags, PRINT_BUF_VFLAGS);
4661 	db_printf(
4662 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
4663 	    "b_bufobj = (%p), b_data = %p, b_blkno = %jd, b_lblkno = %jd, "
4664 	    "b_dep = %p\n",
4665 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
4666 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
4667 	    (intmax_t)bp->b_lblkno, bp->b_dep.lh_first);
4668 	db_printf("b_kvabase = %p, b_kvasize = %d\n",
4669 	    bp->b_kvabase, bp->b_kvasize);
4670 	if (bp->b_npages) {
4671 		int i;
4672 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
4673 		for (i = 0; i < bp->b_npages; i++) {
4674 			vm_page_t m;
4675 			m = bp->b_pages[i];
4676 			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
4677 			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
4678 			if ((i + 1) < bp->b_npages)
4679 				db_printf(",");
4680 		}
4681 		db_printf("\n");
4682 	}
4683 	db_printf(" ");
4684 	BUF_LOCKPRINTINFO(bp);
4685 }
4686 
4687 DB_SHOW_COMMAND(lockedbufs, lockedbufs)
4688 {
4689 	struct buf *bp;
4690 	int i;
4691 
4692 	for (i = 0; i < nbuf; i++) {
4693 		bp = &buf[i];
4694 		if (BUF_ISLOCKED(bp)) {
4695 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4696 			db_printf("\n");
4697 		}
4698 	}
4699 }
4700 
4701 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
4702 {
4703 	struct vnode *vp;
4704 	struct buf *bp;
4705 
4706 	if (!have_addr) {
4707 		db_printf("usage: show vnodebufs <addr>\n");
4708 		return;
4709 	}
4710 	vp = (struct vnode *)addr;
4711 	db_printf("Clean buffers:\n");
4712 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
4713 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4714 		db_printf("\n");
4715 	}
4716 	db_printf("Dirty buffers:\n");
4717 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
4718 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
4719 		db_printf("\n");
4720 	}
4721 }
4722 
4723 DB_COMMAND(countfreebufs, db_coundfreebufs)
4724 {
4725 	struct buf *bp;
4726 	int i, used = 0, nfree = 0;
4727 
4728 	if (have_addr) {
4729 		db_printf("usage: countfreebufs\n");
4730 		return;
4731 	}
4732 
4733 	for (i = 0; i < nbuf; i++) {
4734 		bp = &buf[i];
4735 		if (bp->b_qindex == QUEUE_EMPTY)
4736 			nfree++;
4737 		else
4738 			used++;
4739 	}
4740 
4741 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
4742 	    nfree + used);
4743 	db_printf("numfreebuffers is %d\n", numfreebuffers);
4744 }
4745 #endif /* DDB */
4746