xref: /freebsd/sys/kern/vfs_bio.c (revision ee2ea5ceafed78a5bd9810beb9e3ca927180c226)
1 /*
2  * Copyright (c) 1994,1997 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Absolutely no warranty of function or purpose is made by the author
12  *		John S. Dyson.
13  *
14  * $FreeBSD$
15  */
16 
17 /*
18  * this file contains a new buffer I/O scheme implementing a coherent
19  * VM object and buffer cache scheme.  Pains have been taken to make
20  * sure that the performance degradation associated with schemes such
21  * as this is not realized.
22  *
23  * Author:  John S. Dyson
24  * Significant help during the development and debugging phases
25  * had been provided by David Greenman, also of the FreeBSD core team.
26  *
27  * see man buf(9) for more info.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bio.h>
33 #include <sys/buf.h>
34 #include <sys/eventhandler.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mount.h>
38 #include <sys/mutex.h>
39 #include <sys/kernel.h>
40 #include <sys/kthread.h>
41 #include <sys/ktr.h>
42 #include <sys/proc.h>
43 #include <sys/reboot.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sysctl.h>
46 #include <sys/vmmeter.h>
47 #include <sys/vnode.h>
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
56 
57 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
58 
59 struct	bio_ops bioops;		/* I/O operation notification */
60 
61 struct	buf_ops buf_ops_bio = {
62 	"buf_ops_bio",
63 	bwrite
64 };
65 
66 /*
67  * XXX buf is global because kern_shutdown.c and ffs_checkoverlap has
68  * carnal knowledge of buffers.  This knowledge should be moved to vfs_bio.c.
69  */
70 struct buf *buf;		/* buffer header pool */
71 struct mtx buftimelock;		/* Interlock on setting prio and timo */
72 
73 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74 		vm_offset_t to);
75 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76 		vm_offset_t to);
77 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
78 			       int pageno, vm_page_t m);
79 static void vfs_clean_pages(struct buf * bp);
80 static void vfs_setdirty(struct buf *bp);
81 static void vfs_vmio_release(struct buf *bp);
82 static void vfs_backgroundwritedone(struct buf *bp);
83 static int flushbufqueues(void);
84 static void buf_daemon(void);
85 
86 int vmiodirenable = TRUE;
87 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
88     "Use the VM system for directory writes");
89 int runningbufspace;
90 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
91     "Amount of presently outstanding async buffer io");
92 static int bufspace;
93 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, &bufspace, 0,
94     "KVA memory used for bufs");
95 static int maxbufspace;
96 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RD, &maxbufspace, 0,
97     "Maximum allowed value of bufspace (including buf_daemon)");
98 static int bufmallocspace;
99 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
100     "Amount of malloced memory for buffers");
101 static int maxbufmallocspace;
102 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace, 0,
103     "Maximum amount of malloced memory for buffers");
104 static int lobufspace;
105 SYSCTL_INT(_vfs, OID_AUTO, lobufspace, CTLFLAG_RD, &lobufspace, 0,
106     "Minimum amount of buffers we want to have");
107 static int hibufspace;
108 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD, &hibufspace, 0,
109     "Maximum allowed value of bufspace (excluding buf_daemon)");
110 static int bufreusecnt;
111 SYSCTL_INT(_vfs, OID_AUTO, bufreusecnt, CTLFLAG_RW, &bufreusecnt, 0,
112     "Number of times we have reused a buffer");
113 static int buffreekvacnt;
114 SYSCTL_INT(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt, 0,
115     "Number of times we have freed the KVA space from some buffer");
116 static int bufdefragcnt;
117 SYSCTL_INT(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt, 0,
118     "Number of times we have had to repeat buffer allocation to defragment");
119 static int lorunningspace;
120 SYSCTL_INT(_vfs, OID_AUTO, lorunningspace, CTLFLAG_RW, &lorunningspace, 0,
121     "Minimum preferred space used for in-progress I/O");
122 static int hirunningspace;
123 SYSCTL_INT(_vfs, OID_AUTO, hirunningspace, CTLFLAG_RW, &hirunningspace, 0,
124     "Maximum amount of space to use for in-progress I/O");
125 static int numdirtybuffers;
126 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, &numdirtybuffers, 0,
127     "Number of buffers that are dirty (has unwritten changes) at the moment");
128 static int lodirtybuffers;
129 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, &lodirtybuffers, 0,
130     "How many buffers we want to have free before bufdaemon can sleep");
131 static int hidirtybuffers;
132 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, &hidirtybuffers, 0,
133     "When the number of dirty buffers is considered severe");
134 static int numfreebuffers;
135 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
136     "Number of free buffers");
137 static int lofreebuffers;
138 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, &lofreebuffers, 0,
139    "XXX Unused");
140 static int hifreebuffers;
141 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, &hifreebuffers, 0,
142    "XXX Complicatedly unused");
143 static int getnewbufcalls;
144 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW, &getnewbufcalls, 0,
145    "Number of calls to getnewbuf");
146 static int getnewbufrestarts;
147 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW, &getnewbufrestarts, 0,
148     "Number of times getnewbuf has had to restart a buffer aquisition");
149 static int dobkgrdwrite = 1;
150 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
151     "Do background writes (honoring the BX_BKGRDWRITE flag)?");
152 
153 /*
154  * Wakeup point for bufdaemon, as well as indicator of whether it is already
155  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
156  * is idling.
157  */
158 static int bd_request;
159 
160 /*
161  * bogus page -- for I/O to/from partially complete buffers
162  * this is a temporary solution to the problem, but it is not
163  * really that bad.  it would be better to split the buffer
164  * for input in the case of buffers partially already in memory,
165  * but the code is intricate enough already.
166  */
167 vm_page_t bogus_page;
168 
169 /*
170  * Offset for bogus_page.
171  * XXX bogus_offset should be local to bufinit
172  */
173 static vm_offset_t bogus_offset;
174 
175 /*
176  * Synchronization (sleep/wakeup) variable for active buffer space requests.
177  * Set when wait starts, cleared prior to wakeup().
178  * Used in runningbufwakeup() and waitrunningbufspace().
179  */
180 static int runningbufreq;
181 
182 /*
183  * Synchronization (sleep/wakeup) variable for buffer requests.
184  * Can contain the VFS_BIO_NEED flags defined below; setting/clearing is done
185  * by and/or.
186  * Used in numdirtywakeup(), bufspacewakeup(), bufcountwakeup(), bwillwrite(),
187  * getnewbuf(), and getblk().
188  */
189 static int needsbuffer;
190 
191 /*
192  * Mask for index into the buffer hash table, which needs to be power of 2 in
193  * size.  Set in kern_vfs_bio_buffer_alloc.
194  */
195 static int bufhashmask;
196 
197 /*
198  * Hash table for all buffers, with a linked list hanging from each table
199  * entry.  Set in kern_vfs_bio_buffer_alloc, initialized in buf_init.
200  */
201 static LIST_HEAD(bufhashhdr, buf) *bufhashtbl;
202 
203 /*
204  * Somewhere to store buffers when they are not in another list, to always
205  * have them in a list (and thus being able to use the same set of operations
206  * on them.)
207  */
208 static struct bufhashhdr invalhash;
209 
210 /*
211  * Definitions for the buffer free lists.
212  */
213 #define BUFFER_QUEUES	6	/* number of free buffer queues */
214 
215 #define QUEUE_NONE	0	/* on no queue */
216 #define QUEUE_LOCKED	1	/* locked buffers */
217 #define QUEUE_CLEAN	2	/* non-B_DELWRI buffers */
218 #define QUEUE_DIRTY	3	/* B_DELWRI buffers */
219 #define QUEUE_EMPTYKVA	4	/* empty buffer headers w/KVA assignment */
220 #define QUEUE_EMPTY	5	/* empty buffer headers */
221 
222 /* Queues for free buffers with various properties */
223 static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES] = { { 0 } };
224 /*
225  * Single global constant for BUF_WMESG, to avoid getting multiple references.
226  * buf_wmesg is referred from macros.
227  */
228 const char *buf_wmesg = BUF_WMESG;
229 
230 #define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
231 #define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
232 #define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
233 #define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
234 
235 /*
236  * Buffer hash table code.  Note that the logical block scans linearly, which
237  * gives us some L1 cache locality.
238  */
239 
240 static __inline
241 struct bufhashhdr *
242 bufhash(struct vnode *vnp, daddr_t bn)
243 {
244 	return(&bufhashtbl[(((uintptr_t)(vnp) >> 7) + (int)bn) & bufhashmask]);
245 }
246 
247 /*
248  *	numdirtywakeup:
249  *
250  *	If someone is blocked due to there being too many dirty buffers,
251  *	and numdirtybuffers is now reasonable, wake them up.
252  */
253 
254 static __inline void
255 numdirtywakeup(int level)
256 {
257 	if (numdirtybuffers <= level) {
258 		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
259 			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
260 			wakeup(&needsbuffer);
261 		}
262 	}
263 }
264 
265 /*
266  *	bufspacewakeup:
267  *
268  *	Called when buffer space is potentially available for recovery.
269  *	getnewbuf() will block on this flag when it is unable to free
270  *	sufficient buffer space.  Buffer space becomes recoverable when
271  *	bp's get placed back in the queues.
272  */
273 
274 static __inline void
275 bufspacewakeup(void)
276 {
277 	/*
278 	 * If someone is waiting for BUF space, wake them up.  Even
279 	 * though we haven't freed the kva space yet, the waiting
280 	 * process will be able to now.
281 	 */
282 	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
283 		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
284 		wakeup(&needsbuffer);
285 	}
286 }
287 
288 /*
289  * runningbufwakeup() - in-progress I/O accounting.
290  *
291  */
292 static __inline void
293 runningbufwakeup(struct buf *bp)
294 {
295 	if (bp->b_runningbufspace) {
296 		runningbufspace -= bp->b_runningbufspace;
297 		bp->b_runningbufspace = 0;
298 		if (runningbufreq && runningbufspace <= lorunningspace) {
299 			runningbufreq = 0;
300 			wakeup(&runningbufreq);
301 		}
302 	}
303 }
304 
305 /*
306  *	bufcountwakeup:
307  *
308  *	Called when a buffer has been added to one of the free queues to
309  *	account for the buffer and to wakeup anyone waiting for free buffers.
310  *	This typically occurs when large amounts of metadata are being handled
311  *	by the buffer cache ( else buffer space runs out first, usually ).
312  */
313 
314 static __inline void
315 bufcountwakeup(void)
316 {
317 	++numfreebuffers;
318 	if (needsbuffer) {
319 		needsbuffer &= ~VFS_BIO_NEED_ANY;
320 		if (numfreebuffers >= hifreebuffers)
321 			needsbuffer &= ~VFS_BIO_NEED_FREE;
322 		wakeup(&needsbuffer);
323 	}
324 }
325 
326 /*
327  *	waitrunningbufspace()
328  *
329  *	runningbufspace is a measure of the amount of I/O currently
330  *	running.  This routine is used in async-write situations to
331  *	prevent creating huge backups of pending writes to a device.
332  *	Only asynchronous writes are governed by this function.
333  *
334  *	Reads will adjust runningbufspace, but will not block based on it.
335  *	The read load has a side effect of reducing the allowed write load.
336  *
337  *	This does NOT turn an async write into a sync write.  It waits
338  *	for earlier writes to complete and generally returns before the
339  *	caller's write has reached the device.
340  */
341 static __inline void
342 waitrunningbufspace(void)
343 {
344 	/*
345 	 * XXX race against wakeup interrupt, currently
346 	 * protected by Giant.  FIXME!
347 	 */
348 	while (runningbufspace > hirunningspace) {
349 		++runningbufreq;
350 		tsleep(&runningbufreq, PVM, "wdrain", 0);
351 	}
352 }
353 
354 
355 /*
356  *	vfs_buf_test_cache:
357  *
358  *	Called when a buffer is extended.  This function clears the B_CACHE
359  *	bit if the newly extended portion of the buffer does not contain
360  *	valid data.
361  */
362 static __inline__
363 void
364 vfs_buf_test_cache(struct buf *bp,
365 		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
366 		  vm_page_t m)
367 {
368 	GIANT_REQUIRED;
369 
370 	if (bp->b_flags & B_CACHE) {
371 		int base = (foff + off) & PAGE_MASK;
372 		if (vm_page_is_valid(m, base, size) == 0)
373 			bp->b_flags &= ~B_CACHE;
374 	}
375 }
376 
377 /* Wake up the buffer deamon if necessary */
378 static __inline__
379 void
380 bd_wakeup(int dirtybuflevel)
381 {
382 	if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
383 		bd_request = 1;
384 		wakeup(&bd_request);
385 	}
386 }
387 
388 /*
389  * bd_speedup - speedup the buffer cache flushing code
390  */
391 
392 static __inline__
393 void
394 bd_speedup(void)
395 {
396 	bd_wakeup(1);
397 }
398 
399 /*
400  * Calculating buffer cache scaling values and reserve space for buffer
401  * headers.  This is called during low level kernel initialization and
402  * may be called more then once.  We CANNOT write to the memory area
403  * being reserved at this time.
404  */
405 caddr_t
406 kern_vfs_bio_buffer_alloc(caddr_t v, int physmem_est)
407 {
408 	/*
409 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
410 	 * PAGE_SIZE is >= 1K)
411 	 */
412 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
413 
414 	/*
415 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
416 	 * For the first 64MB of ram nominally allocate sufficient buffers to
417 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
418 	 * buffers to cover 1/20 of our ram over 64MB.  When auto-sizing
419 	 * the buffer cache we limit the eventual kva reservation to
420 	 * maxbcache bytes.
421 	 *
422 	 * factor represents the 1/4 x ram conversion.
423 	 */
424 	if (nbuf == 0) {
425 		int factor = 4 * BKVASIZE / 1024;
426 
427 		nbuf = 50;
428 		if (physmem_est > 4096)
429 			nbuf += min((physmem_est - 4096) / factor,
430 			    65536 / factor);
431 		if (physmem_est > 65536)
432 			nbuf += (physmem_est - 65536) * 2 / (factor * 5);
433 
434 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
435 			nbuf = maxbcache / BKVASIZE;
436 	}
437 
438 #if 0
439 	/*
440 	 * Do not allow the buffer_map to be more then 1/2 the size of the
441 	 * kernel_map.
442 	 */
443 	if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
444 	    (BKVASIZE * 2)) {
445 		nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
446 		    (BKVASIZE * 2);
447 		printf("Warning: nbufs capped at %d\n", nbuf);
448 	}
449 #endif
450 
451 	/*
452 	 * swbufs are used as temporary holders for I/O, such as paging I/O.
453 	 * We have no less then 16 and no more then 256.
454 	 */
455 	nswbuf = max(min(nbuf/4, 256), 16);
456 
457 	/*
458 	 * Reserve space for the buffer cache buffers
459 	 */
460 	swbuf = (void *)v;
461 	v = (caddr_t)(swbuf + nswbuf);
462 	buf = (void *)v;
463 	v = (caddr_t)(buf + nbuf);
464 
465 	/*
466 	 * Calculate the hash table size and reserve space
467 	 */
468 	for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1)
469 		;
470 	bufhashtbl = (void *)v;
471 	v = (caddr_t)(bufhashtbl + bufhashmask);
472 	--bufhashmask;
473 
474 	return(v);
475 }
476 
477 /* Initialize the buffer subsystem.  Called before use of any buffers. */
478 void
479 bufinit(void)
480 {
481 	struct buf *bp;
482 	int i;
483 
484 	GIANT_REQUIRED;
485 
486 	LIST_INIT(&invalhash);
487 	mtx_init(&buftimelock, "buftime lock", NULL, MTX_DEF);
488 
489 	for (i = 0; i <= bufhashmask; i++)
490 		LIST_INIT(&bufhashtbl[i]);
491 
492 	/* next, make a null set of free lists */
493 	for (i = 0; i < BUFFER_QUEUES; i++)
494 		TAILQ_INIT(&bufqueues[i]);
495 
496 	/* finally, initialize each buffer header and stick on empty q */
497 	for (i = 0; i < nbuf; i++) {
498 		bp = &buf[i];
499 		bzero(bp, sizeof *bp);
500 		bp->b_flags = B_INVAL;	/* we're just an empty header */
501 		bp->b_dev = NODEV;
502 		bp->b_rcred = NOCRED;
503 		bp->b_wcred = NOCRED;
504 		bp->b_qindex = QUEUE_EMPTY;
505 		bp->b_xflags = 0;
506 		LIST_INIT(&bp->b_dep);
507 		BUF_LOCKINIT(bp);
508 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
509 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
510 	}
511 
512 	/*
513 	 * maxbufspace is the absolute maximum amount of buffer space we are
514 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
515 	 * is nominally used by buf_daemon.  hibufspace is the nominal maximum
516 	 * used by most other processes.  The differential is required to
517 	 * ensure that buf_daemon is able to run when other processes might
518 	 * be blocked waiting for buffer space.
519 	 *
520 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
521 	 * this may result in KVM fragmentation which is not handled optimally
522 	 * by the system.
523 	 */
524 	maxbufspace = nbuf * BKVASIZE;
525 	hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 10);
526 	lobufspace = hibufspace - MAXBSIZE;
527 
528 	lorunningspace = 512 * 1024;
529 	hirunningspace = 1024 * 1024;
530 
531 /*
532  * Limit the amount of malloc memory since it is wired permanently into
533  * the kernel space.  Even though this is accounted for in the buffer
534  * allocation, we don't want the malloced region to grow uncontrolled.
535  * The malloc scheme improves memory utilization significantly on average
536  * (small) directories.
537  */
538 	maxbufmallocspace = hibufspace / 20;
539 
540 /*
541  * Reduce the chance of a deadlock occuring by limiting the number
542  * of delayed-write dirty buffers we allow to stack up.
543  */
544 	hidirtybuffers = nbuf / 4 + 20;
545 	numdirtybuffers = 0;
546 /*
547  * To support extreme low-memory systems, make sure hidirtybuffers cannot
548  * eat up all available buffer space.  This occurs when our minimum cannot
549  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
550  * BKVASIZE'd (8K) buffers.
551  */
552 	while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
553 		hidirtybuffers >>= 1;
554 	}
555 	lodirtybuffers = hidirtybuffers / 2;
556 
557 /*
558  * Try to keep the number of free buffers in the specified range,
559  * and give special processes (e.g. like buf_daemon) access to an
560  * emergency reserve.
561  */
562 	lofreebuffers = nbuf / 18 + 5;
563 	hifreebuffers = 2 * lofreebuffers;
564 	numfreebuffers = nbuf;
565 
566 /*
567  * Maximum number of async ops initiated per buf_daemon loop.  This is
568  * somewhat of a hack at the moment, we really need to limit ourselves
569  * based on the number of bytes of I/O in-transit that were initiated
570  * from buf_daemon.
571  */
572 
573 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
574 	bogus_page = vm_page_alloc(kernel_object,
575 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
576 			VM_ALLOC_NORMAL);
577 	cnt.v_wire_count++;
578 }
579 
580 /*
581  * bfreekva() - free the kva allocation for a buffer.
582  *
583  *	Must be called at splbio() or higher as this is the only locking for
584  *	buffer_map.
585  *
586  *	Since this call frees up buffer space, we call bufspacewakeup().
587  */
588 static void
589 bfreekva(struct buf * bp)
590 {
591 	GIANT_REQUIRED;
592 
593 	if (bp->b_kvasize) {
594 		++buffreekvacnt;
595 		bufspace -= bp->b_kvasize;
596 		vm_map_delete(buffer_map,
597 		    (vm_offset_t) bp->b_kvabase,
598 		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
599 		);
600 		bp->b_kvasize = 0;
601 		bufspacewakeup();
602 	}
603 }
604 
605 /*
606  *	bremfree:
607  *
608  *	Remove the buffer from the appropriate free list.
609  */
610 void
611 bremfree(struct buf * bp)
612 {
613 	int s = splbio();
614 	int old_qindex = bp->b_qindex;
615 
616 	GIANT_REQUIRED;
617 
618 	if (bp->b_qindex != QUEUE_NONE) {
619 		KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
620 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
621 		bp->b_qindex = QUEUE_NONE;
622 	} else {
623 		if (BUF_REFCNT(bp) <= 1)
624 			panic("bremfree: removing a buffer not on a queue");
625 	}
626 
627 	/*
628 	 * Fixup numfreebuffers count.  If the buffer is invalid or not
629 	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
630 	 * the buffer was free and we must decrement numfreebuffers.
631 	 */
632 	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
633 		switch(old_qindex) {
634 		case QUEUE_DIRTY:
635 		case QUEUE_CLEAN:
636 		case QUEUE_EMPTY:
637 		case QUEUE_EMPTYKVA:
638 			--numfreebuffers;
639 			break;
640 		default:
641 			break;
642 		}
643 	}
644 	splx(s);
645 }
646 
647 
648 /*
649  * Get a buffer with the specified data.  Look in the cache first.  We
650  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
651  * is set, the buffer is valid and we do not have to do anything ( see
652  * getblk() ).  This is really just a special case of breadn().
653  */
654 int
655 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
656     struct buf ** bpp)
657 {
658 
659 	return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
660 }
661 
662 /*
663  * Operates like bread, but also starts asynchronous I/O on
664  * read-ahead blocks.  We must clear BIO_ERROR and B_INVAL prior
665  * to initiating I/O . If B_CACHE is set, the buffer is valid
666  * and we do not have to do anything.
667  */
668 int
669 breadn(struct vnode * vp, daddr_t blkno, int size,
670     daddr_t * rablkno, int *rabsize,
671     int cnt, struct ucred * cred, struct buf ** bpp)
672 {
673 	struct buf *bp, *rabp;
674 	int i;
675 	int rv = 0, readwait = 0;
676 
677 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
678 
679 	/* if not found in cache, do some I/O */
680 	if ((bp->b_flags & B_CACHE) == 0) {
681 		if (curthread != PCPU_GET(idlethread))
682 			curthread->td_proc->p_stats->p_ru.ru_inblock++;
683 		bp->b_iocmd = BIO_READ;
684 		bp->b_flags &= ~B_INVAL;
685 		bp->b_ioflags &= ~BIO_ERROR;
686 		if (bp->b_rcred == NOCRED && cred != NOCRED)
687 			bp->b_rcred = crhold(cred);
688 		vfs_busy_pages(bp, 0);
689 		VOP_STRATEGY(vp, bp);
690 		++readwait;
691 	}
692 
693 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
694 		if (inmem(vp, *rablkno))
695 			continue;
696 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
697 
698 		if ((rabp->b_flags & B_CACHE) == 0) {
699 			if (curthread != PCPU_GET(idlethread))
700 				curthread->td_proc->p_stats->p_ru.ru_inblock++;
701 			rabp->b_flags |= B_ASYNC;
702 			rabp->b_flags &= ~B_INVAL;
703 			rabp->b_ioflags &= ~BIO_ERROR;
704 			rabp->b_iocmd = BIO_READ;
705 			if (rabp->b_rcred == NOCRED && cred != NOCRED)
706 				rabp->b_rcred = crhold(cred);
707 			vfs_busy_pages(rabp, 0);
708 			BUF_KERNPROC(rabp);
709 			VOP_STRATEGY(vp, rabp);
710 		} else {
711 			brelse(rabp);
712 		}
713 	}
714 
715 	if (readwait) {
716 		rv = bufwait(bp);
717 	}
718 	return (rv);
719 }
720 
721 /*
722  * Write, release buffer on completion.  (Done by iodone
723  * if async).  Do not bother writing anything if the buffer
724  * is invalid.
725  *
726  * Note that we set B_CACHE here, indicating that buffer is
727  * fully valid and thus cacheable.  This is true even of NFS
728  * now so we set it generally.  This could be set either here
729  * or in biodone() since the I/O is synchronous.  We put it
730  * here.
731  */
732 
733 int
734 bwrite(struct buf * bp)
735 {
736 	int oldflags, s;
737 	struct buf *newbp;
738 
739 	if (bp->b_flags & B_INVAL) {
740 		brelse(bp);
741 		return (0);
742 	}
743 
744 	oldflags = bp->b_flags;
745 
746 	if (BUF_REFCNT(bp) == 0)
747 		panic("bwrite: buffer is not busy???");
748 	s = splbio();
749 	/*
750 	 * If a background write is already in progress, delay
751 	 * writing this block if it is asynchronous. Otherwise
752 	 * wait for the background write to complete.
753 	 */
754 	if (bp->b_xflags & BX_BKGRDINPROG) {
755 		if (bp->b_flags & B_ASYNC) {
756 			splx(s);
757 			bdwrite(bp);
758 			return (0);
759 		}
760 		bp->b_xflags |= BX_BKGRDWAIT;
761 		tsleep(&bp->b_xflags, PRIBIO, "biord", 0);
762 		if (bp->b_xflags & BX_BKGRDINPROG)
763 			panic("bwrite: still writing");
764 	}
765 
766 	/* Mark the buffer clean */
767 	bundirty(bp);
768 
769 	/*
770 	 * If this buffer is marked for background writing and we
771 	 * do not have to wait for it, make a copy and write the
772 	 * copy so as to leave this buffer ready for further use.
773 	 *
774 	 * This optimization eats a lot of memory.  If we have a page
775 	 * or buffer shortfall we can't do it.
776 	 */
777 	if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
778 	    (bp->b_flags & B_ASYNC) &&
779 	    !vm_page_count_severe() &&
780 	    !buf_dirty_count_severe()) {
781 		if (bp->b_iodone != NULL) {
782 			printf("bp->b_iodone = %p\n", bp->b_iodone);
783 			panic("bwrite: need chained iodone");
784 		}
785 
786 		/* get a new block */
787 		newbp = geteblk(bp->b_bufsize);
788 
789 		/* set it to be identical to the old block */
790 		memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
791 		bgetvp(bp->b_vp, newbp);
792 		newbp->b_lblkno = bp->b_lblkno;
793 		newbp->b_blkno = bp->b_blkno;
794 		newbp->b_offset = bp->b_offset;
795 		newbp->b_iodone = vfs_backgroundwritedone;
796 		newbp->b_flags |= B_ASYNC;
797 		newbp->b_flags &= ~B_INVAL;
798 
799 		/* move over the dependencies */
800 		if (LIST_FIRST(&bp->b_dep) != NULL)
801 			buf_movedeps(bp, newbp);
802 
803 		/*
804 		 * Initiate write on the copy, release the original to
805 		 * the B_LOCKED queue so that it cannot go away until
806 		 * the background write completes. If not locked it could go
807 		 * away and then be reconstituted while it was being written.
808 		 * If the reconstituted buffer were written, we could end up
809 		 * with two background copies being written at the same time.
810 		 */
811 		bp->b_xflags |= BX_BKGRDINPROG;
812 		bp->b_flags |= B_LOCKED;
813 		bqrelse(bp);
814 		bp = newbp;
815 	}
816 
817 	bp->b_flags &= ~B_DONE;
818 	bp->b_ioflags &= ~BIO_ERROR;
819 	bp->b_flags |= B_WRITEINPROG | B_CACHE;
820 	bp->b_iocmd = BIO_WRITE;
821 
822 	bp->b_vp->v_numoutput++;
823 	vfs_busy_pages(bp, 1);
824 
825 	/*
826 	 * Normal bwrites pipeline writes
827 	 */
828 	bp->b_runningbufspace = bp->b_bufsize;
829 	runningbufspace += bp->b_runningbufspace;
830 
831 	if (curthread != PCPU_GET(idlethread))
832 		curthread->td_proc->p_stats->p_ru.ru_oublock++;
833 	splx(s);
834 	if (oldflags & B_ASYNC)
835 		BUF_KERNPROC(bp);
836 	BUF_STRATEGY(bp);
837 
838 	if ((oldflags & B_ASYNC) == 0) {
839 		int rtval = bufwait(bp);
840 		brelse(bp);
841 		return (rtval);
842 	} else if ((oldflags & B_NOWDRAIN) == 0) {
843 		/*
844 		 * don't allow the async write to saturate the I/O
845 		 * system.  Deadlocks can occur only if a device strategy
846 		 * routine (like in MD) turns around and issues another
847 		 * high-level write, in which case B_NOWDRAIN is expected
848 		 * to be set.  Otherwise we will not deadlock here because
849 		 * we are blocking waiting for I/O that is already in-progress
850 		 * to complete.
851 		 */
852 		waitrunningbufspace();
853 	}
854 
855 	return (0);
856 }
857 
858 /*
859  * Complete a background write started from bwrite.
860  */
861 static void
862 vfs_backgroundwritedone(bp)
863 	struct buf *bp;
864 {
865 	struct buf *origbp;
866 
867 	/*
868 	 * Find the original buffer that we are writing.
869 	 */
870 	if ((origbp = gbincore(bp->b_vp, bp->b_lblkno)) == NULL)
871 		panic("backgroundwritedone: lost buffer");
872 	/*
873 	 * Process dependencies then return any unfinished ones.
874 	 */
875 	if (LIST_FIRST(&bp->b_dep) != NULL)
876 		buf_complete(bp);
877 	if (LIST_FIRST(&bp->b_dep) != NULL)
878 		buf_movedeps(bp, origbp);
879 	/*
880 	 * Clear the BX_BKGRDINPROG flag in the original buffer
881 	 * and awaken it if it is waiting for the write to complete.
882 	 * If BX_BKGRDINPROG is not set in the original buffer it must
883 	 * have been released and re-instantiated - which is not legal.
884 	 */
885 	KASSERT((origbp->b_xflags & BX_BKGRDINPROG),
886 	    ("backgroundwritedone: lost buffer2"));
887 	origbp->b_xflags &= ~BX_BKGRDINPROG;
888 	if (origbp->b_xflags & BX_BKGRDWAIT) {
889 		origbp->b_xflags &= ~BX_BKGRDWAIT;
890 		wakeup(&origbp->b_xflags);
891 	}
892 	/*
893 	 * Clear the B_LOCKED flag and remove it from the locked
894 	 * queue if it currently resides there.
895 	 */
896 	origbp->b_flags &= ~B_LOCKED;
897 	if (BUF_LOCK(origbp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
898 		bremfree(origbp);
899 		bqrelse(origbp);
900 	}
901 	/*
902 	 * This buffer is marked B_NOCACHE, so when it is released
903 	 * by biodone, it will be tossed. We mark it with BIO_READ
904 	 * to avoid biodone doing a second vwakeup.
905 	 */
906 	bp->b_flags |= B_NOCACHE;
907 	bp->b_iocmd = BIO_READ;
908 	bp->b_flags &= ~(B_CACHE | B_DONE);
909 	bp->b_iodone = 0;
910 	bufdone(bp);
911 }
912 
913 /*
914  * Delayed write. (Buffer is marked dirty).  Do not bother writing
915  * anything if the buffer is marked invalid.
916  *
917  * Note that since the buffer must be completely valid, we can safely
918  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
919  * biodone() in order to prevent getblk from writing the buffer
920  * out synchronously.
921  */
922 void
923 bdwrite(struct buf * bp)
924 {
925 	GIANT_REQUIRED;
926 
927 	if (BUF_REFCNT(bp) == 0)
928 		panic("bdwrite: buffer is not busy");
929 
930 	if (bp->b_flags & B_INVAL) {
931 		brelse(bp);
932 		return;
933 	}
934 	bdirty(bp);
935 
936 	/*
937 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
938 	 * true even of NFS now.
939 	 */
940 	bp->b_flags |= B_CACHE;
941 
942 	/*
943 	 * This bmap keeps the system from needing to do the bmap later,
944 	 * perhaps when the system is attempting to do a sync.  Since it
945 	 * is likely that the indirect block -- or whatever other datastructure
946 	 * that the filesystem needs is still in memory now, it is a good
947 	 * thing to do this.  Note also, that if the pageout daemon is
948 	 * requesting a sync -- there might not be enough memory to do
949 	 * the bmap then...  So, this is important to do.
950 	 */
951 	if (bp->b_lblkno == bp->b_blkno) {
952 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
953 	}
954 
955 	/*
956 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
957 	 */
958 	vfs_setdirty(bp);
959 
960 	/*
961 	 * We need to do this here to satisfy the vnode_pager and the
962 	 * pageout daemon, so that it thinks that the pages have been
963 	 * "cleaned".  Note that since the pages are in a delayed write
964 	 * buffer -- the VFS layer "will" see that the pages get written
965 	 * out on the next sync, or perhaps the cluster will be completed.
966 	 */
967 	vfs_clean_pages(bp);
968 	bqrelse(bp);
969 
970 	/*
971 	 * Wakeup the buffer flushing daemon if we have a lot of dirty
972 	 * buffers (midpoint between our recovery point and our stall
973 	 * point).
974 	 */
975 	bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
976 
977 	/*
978 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
979 	 * due to the softdep code.
980 	 */
981 }
982 
983 /*
984  *	bdirty:
985  *
986  *	Turn buffer into delayed write request.  We must clear BIO_READ and
987  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
988  *	itself to properly update it in the dirty/clean lists.  We mark it
989  *	B_DONE to ensure that any asynchronization of the buffer properly
990  *	clears B_DONE ( else a panic will occur later ).
991  *
992  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
993  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
994  *	should only be called if the buffer is known-good.
995  *
996  *	Since the buffer is not on a queue, we do not update the numfreebuffers
997  *	count.
998  *
999  *	Must be called at splbio().
1000  *	The buffer must be on QUEUE_NONE.
1001  */
1002 void
1003 bdirty(bp)
1004 	struct buf *bp;
1005 {
1006 	KASSERT(bp->b_qindex == QUEUE_NONE,
1007 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
1008 	bp->b_flags &= ~(B_RELBUF);
1009 	bp->b_iocmd = BIO_WRITE;
1010 
1011 	if ((bp->b_flags & B_DELWRI) == 0) {
1012 		bp->b_flags |= B_DONE | B_DELWRI;
1013 		reassignbuf(bp, bp->b_vp);
1014 		++numdirtybuffers;
1015 		bd_wakeup((lodirtybuffers + hidirtybuffers) / 2);
1016 	}
1017 }
1018 
1019 /*
1020  *	bundirty:
1021  *
1022  *	Clear B_DELWRI for buffer.
1023  *
1024  *	Since the buffer is not on a queue, we do not update the numfreebuffers
1025  *	count.
1026  *
1027  *	Must be called at splbio().
1028  *	The buffer must be on QUEUE_NONE.
1029  */
1030 
1031 void
1032 bundirty(bp)
1033 	struct buf *bp;
1034 {
1035 	KASSERT(bp->b_qindex == QUEUE_NONE,
1036 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
1037 
1038 	if (bp->b_flags & B_DELWRI) {
1039 		bp->b_flags &= ~B_DELWRI;
1040 		reassignbuf(bp, bp->b_vp);
1041 		--numdirtybuffers;
1042 		numdirtywakeup(lodirtybuffers);
1043 	}
1044 	/*
1045 	 * Since it is now being written, we can clear its deferred write flag.
1046 	 */
1047 	bp->b_flags &= ~B_DEFERRED;
1048 }
1049 
1050 /*
1051  *	bawrite:
1052  *
1053  *	Asynchronous write.  Start output on a buffer, but do not wait for
1054  *	it to complete.  The buffer is released when the output completes.
1055  *
1056  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
1057  *	B_INVAL buffers.  Not us.
1058  */
1059 void
1060 bawrite(struct buf * bp)
1061 {
1062 	bp->b_flags |= B_ASYNC;
1063 	(void) BUF_WRITE(bp);
1064 }
1065 
1066 /*
1067  *	bwillwrite:
1068  *
1069  *	Called prior to the locking of any vnodes when we are expecting to
1070  *	write.  We do not want to starve the buffer cache with too many
1071  *	dirty buffers so we block here.  By blocking prior to the locking
1072  *	of any vnodes we attempt to avoid the situation where a locked vnode
1073  *	prevents the various system daemons from flushing related buffers.
1074  */
1075 
1076 void
1077 bwillwrite(void)
1078 {
1079 	if (numdirtybuffers >= hidirtybuffers) {
1080 		int s;
1081 
1082 		mtx_lock(&Giant);
1083 		s = splbio();
1084 		while (numdirtybuffers >= hidirtybuffers) {
1085 			bd_wakeup(1);
1086 			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
1087 			tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0);
1088 		}
1089 		splx(s);
1090 		mtx_unlock(&Giant);
1091 	}
1092 }
1093 
1094 /*
1095  * Return true if we have too many dirty buffers.
1096  */
1097 int
1098 buf_dirty_count_severe(void)
1099 {
1100 	return(numdirtybuffers >= hidirtybuffers);
1101 }
1102 
1103 /*
1104  *	brelse:
1105  *
1106  *	Release a busy buffer and, if requested, free its resources.  The
1107  *	buffer will be stashed in the appropriate bufqueue[] allowing it
1108  *	to be accessed later as a cache entity or reused for other purposes.
1109  */
1110 void
1111 brelse(struct buf * bp)
1112 {
1113 	int s;
1114 
1115 	GIANT_REQUIRED;
1116 
1117 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
1118 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1119 
1120 	s = splbio();
1121 
1122 	if (bp->b_flags & B_LOCKED)
1123 		bp->b_ioflags &= ~BIO_ERROR;
1124 
1125 	if (bp->b_iocmd == BIO_WRITE &&
1126 	    (bp->b_ioflags & BIO_ERROR) &&
1127 	    !(bp->b_flags & B_INVAL)) {
1128 		/*
1129 		 * Failed write, redirty.  Must clear BIO_ERROR to prevent
1130 		 * pages from being scrapped.  If B_INVAL is set then
1131 		 * this case is not run and the next case is run to
1132 		 * destroy the buffer.  B_INVAL can occur if the buffer
1133 		 * is outside the range supported by the underlying device.
1134 		 */
1135 		bp->b_ioflags &= ~BIO_ERROR;
1136 		bdirty(bp);
1137 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
1138 	    (bp->b_ioflags & BIO_ERROR) ||
1139 	    bp->b_iocmd == BIO_DELETE || (bp->b_bufsize <= 0)) {
1140 		/*
1141 		 * Either a failed I/O or we were asked to free or not
1142 		 * cache the buffer.
1143 		 */
1144 		bp->b_flags |= B_INVAL;
1145 		if (LIST_FIRST(&bp->b_dep) != NULL)
1146 			buf_deallocate(bp);
1147 		if (bp->b_flags & B_DELWRI) {
1148 			--numdirtybuffers;
1149 			numdirtywakeup(lodirtybuffers);
1150 		}
1151 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
1152 		if ((bp->b_flags & B_VMIO) == 0) {
1153 			if (bp->b_bufsize)
1154 				allocbuf(bp, 0);
1155 			if (bp->b_vp)
1156 				brelvp(bp);
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
1162 	 * is called with B_DELWRI set, the underlying pages may wind up
1163 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
1164 	 * because pages associated with a B_DELWRI bp are marked clean.
1165 	 *
1166 	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
1167 	 * if B_DELWRI is set.
1168 	 *
1169 	 * If B_DELWRI is not set we may have to set B_RELBUF if we are low
1170 	 * on pages to return pages to the VM page queues.
1171 	 */
1172 	if (bp->b_flags & B_DELWRI)
1173 		bp->b_flags &= ~B_RELBUF;
1174 	else if (vm_page_count_severe() && !(bp->b_xflags & BX_BKGRDINPROG))
1175 		bp->b_flags |= B_RELBUF;
1176 
1177 	/*
1178 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
1179 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
1180 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
1181 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
1182 	 *
1183 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
1184 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
1185 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
1186 	 *
1187 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
1188 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
1189 	 * the commit state and we cannot afford to lose the buffer. If the
1190 	 * buffer has a background write in progress, we need to keep it
1191 	 * around to prevent it from being reconstituted and starting a second
1192 	 * background write.
1193 	 */
1194 	if ((bp->b_flags & B_VMIO)
1195 	    && !(bp->b_vp->v_tag == VT_NFS &&
1196 		 !vn_isdisk(bp->b_vp, NULL) &&
1197 		 (bp->b_flags & B_DELWRI))
1198 	    ) {
1199 
1200 		int i, j, resid;
1201 		vm_page_t m;
1202 		off_t foff;
1203 		vm_pindex_t poff;
1204 		vm_object_t obj;
1205 		struct vnode *vp;
1206 
1207 		vp = bp->b_vp;
1208 
1209 		/*
1210 		 * Get the base offset and length of the buffer.  Note that
1211 		 * in the VMIO case if the buffer block size is not
1212 		 * page-aligned then b_data pointer may not be page-aligned.
1213 		 * But our b_pages[] array *IS* page aligned.
1214 		 *
1215 		 * block sizes less then DEV_BSIZE (usually 512) are not
1216 		 * supported due to the page granularity bits (m->valid,
1217 		 * m->dirty, etc...).
1218 		 *
1219 		 * See man buf(9) for more information
1220 		 */
1221 		resid = bp->b_bufsize;
1222 		foff = bp->b_offset;
1223 
1224 		for (i = 0; i < bp->b_npages; i++) {
1225 			int had_bogus = 0;
1226 
1227 			m = bp->b_pages[i];
1228 			vm_page_flag_clear(m, PG_ZERO);
1229 
1230 			/*
1231 			 * If we hit a bogus page, fixup *all* the bogus pages
1232 			 * now.
1233 			 */
1234 			if (m == bogus_page) {
1235 				VOP_GETVOBJECT(vp, &obj);
1236 				poff = OFF_TO_IDX(bp->b_offset);
1237 				had_bogus = 1;
1238 
1239 				for (j = i; j < bp->b_npages; j++) {
1240 					vm_page_t mtmp;
1241 					mtmp = bp->b_pages[j];
1242 					if (mtmp == bogus_page) {
1243 						mtmp = vm_page_lookup(obj, poff + j);
1244 						if (!mtmp) {
1245 							panic("brelse: page missing\n");
1246 						}
1247 						bp->b_pages[j] = mtmp;
1248 					}
1249 				}
1250 
1251 				if ((bp->b_flags & B_INVAL) == 0) {
1252 					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
1253 				}
1254 				m = bp->b_pages[i];
1255 			}
1256 			if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
1257 				int poffset = foff & PAGE_MASK;
1258 				int presid = resid > (PAGE_SIZE - poffset) ?
1259 					(PAGE_SIZE - poffset) : resid;
1260 
1261 				KASSERT(presid >= 0, ("brelse: extra page"));
1262 				vm_page_set_invalid(m, poffset, presid);
1263 				if (had_bogus)
1264 					printf("avoided corruption bug in bogus_page/brelse code\n");
1265 			}
1266 			resid -= PAGE_SIZE - (foff & PAGE_MASK);
1267 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
1268 		}
1269 
1270 		if (bp->b_flags & (B_INVAL | B_RELBUF))
1271 			vfs_vmio_release(bp);
1272 
1273 	} else if (bp->b_flags & B_VMIO) {
1274 
1275 		if (bp->b_flags & (B_INVAL | B_RELBUF)) {
1276 			vfs_vmio_release(bp);
1277 		}
1278 
1279 	}
1280 
1281 	if (bp->b_qindex != QUEUE_NONE)
1282 		panic("brelse: free buffer onto another queue???");
1283 	if (BUF_REFCNT(bp) > 1) {
1284 		/* do not release to free list */
1285 		BUF_UNLOCK(bp);
1286 		splx(s);
1287 		return;
1288 	}
1289 
1290 	/* enqueue */
1291 
1292 	/* buffers with no memory */
1293 	if (bp->b_bufsize == 0) {
1294 		bp->b_flags |= B_INVAL;
1295 		bp->b_xflags &= ~BX_BKGRDWRITE;
1296 		if (bp->b_xflags & BX_BKGRDINPROG)
1297 			panic("losing buffer 1");
1298 		if (bp->b_kvasize) {
1299 			bp->b_qindex = QUEUE_EMPTYKVA;
1300 		} else {
1301 			bp->b_qindex = QUEUE_EMPTY;
1302 		}
1303 		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1304 		LIST_REMOVE(bp, b_hash);
1305 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1306 		bp->b_dev = NODEV;
1307 	/* buffers with junk contents */
1308 	} else if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
1309 	    (bp->b_ioflags & BIO_ERROR)) {
1310 		bp->b_flags |= B_INVAL;
1311 		bp->b_xflags &= ~BX_BKGRDWRITE;
1312 		if (bp->b_xflags & BX_BKGRDINPROG)
1313 			panic("losing buffer 2");
1314 		bp->b_qindex = QUEUE_CLEAN;
1315 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1316 		LIST_REMOVE(bp, b_hash);
1317 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1318 		bp->b_dev = NODEV;
1319 
1320 	/* buffers that are locked */
1321 	} else if (bp->b_flags & B_LOCKED) {
1322 		bp->b_qindex = QUEUE_LOCKED;
1323 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1324 
1325 	/* remaining buffers */
1326 	} else {
1327 		if (bp->b_flags & B_DELWRI)
1328 			bp->b_qindex = QUEUE_DIRTY;
1329 		else
1330 			bp->b_qindex = QUEUE_CLEAN;
1331 		if (bp->b_flags & B_AGE)
1332 			TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1333 		else
1334 			TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist);
1335 	}
1336 
1337 	/*
1338 	 * If B_INVAL, clear B_DELWRI.  We've already placed the buffer
1339 	 * on the correct queue.
1340 	 */
1341 	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI))
1342 		bundirty(bp);
1343 
1344 	/*
1345 	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1346 	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1347 	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1348 	 * if B_INVAL is set ).
1349 	 */
1350 
1351 	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
1352 		bufcountwakeup();
1353 
1354 	/*
1355 	 * Something we can maybe free or reuse
1356 	 */
1357 	if (bp->b_bufsize || bp->b_kvasize)
1358 		bufspacewakeup();
1359 
1360 	/* unlock */
1361 	BUF_UNLOCK(bp);
1362 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF |
1363 			B_DIRECT | B_NOWDRAIN);
1364 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1365 		panic("brelse: not dirty");
1366 	splx(s);
1367 }
1368 
1369 /*
1370  * Release a buffer back to the appropriate queue but do not try to free
1371  * it.  The buffer is expected to be used again soon.
1372  *
1373  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1374  * biodone() to requeue an async I/O on completion.  It is also used when
1375  * known good buffers need to be requeued but we think we may need the data
1376  * again soon.
1377  *
1378  * XXX we should be able to leave the B_RELBUF hint set on completion.
1379  */
1380 void
1381 bqrelse(struct buf * bp)
1382 {
1383 	int s;
1384 
1385 	s = splbio();
1386 
1387 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1388 
1389 	if (bp->b_qindex != QUEUE_NONE)
1390 		panic("bqrelse: free buffer onto another queue???");
1391 	if (BUF_REFCNT(bp) > 1) {
1392 		/* do not release to free list */
1393 		BUF_UNLOCK(bp);
1394 		splx(s);
1395 		return;
1396 	}
1397 	if (bp->b_flags & B_LOCKED) {
1398 		bp->b_ioflags &= ~BIO_ERROR;
1399 		bp->b_qindex = QUEUE_LOCKED;
1400 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1401 		/* buffers with stale but valid contents */
1402 	} else if (bp->b_flags & B_DELWRI) {
1403 		bp->b_qindex = QUEUE_DIRTY;
1404 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
1405 	} else if (vm_page_count_severe()) {
1406 		/*
1407 		 * We are too low on memory, we have to try to free the
1408 		 * buffer (most importantly: the wired pages making up its
1409 		 * backing store) *now*.
1410 		 */
1411 		splx(s);
1412 		brelse(bp);
1413 		return;
1414 	} else {
1415 		bp->b_qindex = QUEUE_CLEAN;
1416 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1417 	}
1418 
1419 	if ((bp->b_flags & B_LOCKED) == 0 &&
1420 	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) {
1421 		bufcountwakeup();
1422 	}
1423 
1424 	/*
1425 	 * Something we can maybe free or reuse.
1426 	 */
1427 	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1428 		bufspacewakeup();
1429 
1430 	/* unlock */
1431 	BUF_UNLOCK(bp);
1432 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1433 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
1434 		panic("bqrelse: not dirty");
1435 	splx(s);
1436 }
1437 
1438 /* Give pages used by the bp back to the VM system (where possible) */
1439 static void
1440 vfs_vmio_release(bp)
1441 	struct buf *bp;
1442 {
1443 	int i;
1444 	vm_page_t m;
1445 
1446 	GIANT_REQUIRED;
1447 
1448 	for (i = 0; i < bp->b_npages; i++) {
1449 		m = bp->b_pages[i];
1450 		bp->b_pages[i] = NULL;
1451 		/*
1452 		 * In order to keep page LRU ordering consistent, put
1453 		 * everything on the inactive queue.
1454 		 */
1455 		vm_page_unwire(m, 0);
1456 		/*
1457 		 * We don't mess with busy pages, it is
1458 		 * the responsibility of the process that
1459 		 * busied the pages to deal with them.
1460 		 */
1461 		if ((m->flags & PG_BUSY) || (m->busy != 0))
1462 			continue;
1463 
1464 		if (m->wire_count == 0) {
1465 			vm_page_flag_clear(m, PG_ZERO);
1466 			/*
1467 			 * Might as well free the page if we can and it has
1468 			 * no valid data.  We also free the page if the
1469 			 * buffer was used for direct I/O
1470 			 */
1471 			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
1472 			    m->hold_count == 0) {
1473 				vm_page_busy(m);
1474 				vm_page_protect(m, VM_PROT_NONE);
1475 				vm_page_free(m);
1476 			} else if (bp->b_flags & B_DIRECT) {
1477 				vm_page_try_to_free(m);
1478 			} else if (vm_page_count_severe()) {
1479 				vm_page_try_to_cache(m);
1480 			}
1481 		}
1482 	}
1483 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1484 
1485 	if (bp->b_bufsize) {
1486 		bufspacewakeup();
1487 		bp->b_bufsize = 0;
1488 	}
1489 	bp->b_npages = 0;
1490 	bp->b_flags &= ~B_VMIO;
1491 	if (bp->b_vp)
1492 		brelvp(bp);
1493 }
1494 
1495 /*
1496  * Check to see if a block is currently memory resident.
1497  */
1498 struct buf *
1499 gbincore(struct vnode * vp, daddr_t blkno)
1500 {
1501 	struct buf *bp;
1502 	struct bufhashhdr *bh;
1503 
1504 	bh = bufhash(vp, blkno);
1505 
1506 	/* Search hash chain */
1507 	LIST_FOREACH(bp, bh, b_hash) {
1508 		/* hit */
1509 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1510 		    (bp->b_flags & B_INVAL) == 0) {
1511 			break;
1512 		}
1513 	}
1514 	return (bp);
1515 }
1516 
1517 /*
1518  *	vfs_bio_awrite:
1519  *
1520  *	Implement clustered async writes for clearing out B_DELWRI buffers.
1521  *	This is much better then the old way of writing only one buffer at
1522  *	a time.  Note that we may not be presented with the buffers in the
1523  *	correct order, so we search for the cluster in both directions.
1524  */
1525 int
1526 vfs_bio_awrite(struct buf * bp)
1527 {
1528 	int i;
1529 	int j;
1530 	daddr_t lblkno = bp->b_lblkno;
1531 	struct vnode *vp = bp->b_vp;
1532 	int s;
1533 	int ncl;
1534 	struct buf *bpa;
1535 	int nwritten;
1536 	int size;
1537 	int maxcl;
1538 
1539 	s = splbio();
1540 	/*
1541 	 * right now we support clustered writing only to regular files.  If
1542 	 * we find a clusterable block we could be in the middle of a cluster
1543 	 * rather then at the beginning.
1544 	 */
1545 	if ((vp->v_type == VREG) &&
1546 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1547 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1548 
1549 		size = vp->v_mount->mnt_stat.f_iosize;
1550 		maxcl = MAXPHYS / size;
1551 
1552 		for (i = 1; i < maxcl; i++) {
1553 			if ((bpa = gbincore(vp, lblkno + i)) &&
1554 			    BUF_REFCNT(bpa) == 0 &&
1555 			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1556 			    (B_DELWRI | B_CLUSTEROK)) &&
1557 			    (bpa->b_bufsize == size)) {
1558 				if ((bpa->b_blkno == bpa->b_lblkno) ||
1559 				    (bpa->b_blkno !=
1560 				     bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1561 					break;
1562 			} else {
1563 				break;
1564 			}
1565 		}
1566 		for (j = 1; i + j <= maxcl && j <= lblkno; j++) {
1567 			if ((bpa = gbincore(vp, lblkno - j)) &&
1568 			    BUF_REFCNT(bpa) == 0 &&
1569 			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1570 			    (B_DELWRI | B_CLUSTEROK)) &&
1571 			    (bpa->b_bufsize == size)) {
1572 				if ((bpa->b_blkno == bpa->b_lblkno) ||
1573 				    (bpa->b_blkno !=
1574 				     bp->b_blkno - ((j * size) >> DEV_BSHIFT)))
1575 					break;
1576 			} else {
1577 				break;
1578 			}
1579 		}
1580 		--j;
1581 		ncl = i + j;
1582 		/*
1583 		 * this is a possible cluster write
1584 		 */
1585 		if (ncl != 1) {
1586 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1587 			splx(s);
1588 			return nwritten;
1589 		}
1590 	}
1591 
1592 	BUF_LOCK(bp, LK_EXCLUSIVE);
1593 	bremfree(bp);
1594 	bp->b_flags |= B_ASYNC;
1595 
1596 	splx(s);
1597 	/*
1598 	 * default (old) behavior, writing out only one block
1599 	 *
1600 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1601 	 */
1602 	nwritten = bp->b_bufsize;
1603 	(void) BUF_WRITE(bp);
1604 
1605 	return nwritten;
1606 }
1607 
1608 /*
1609  *	getnewbuf:
1610  *
1611  *	Find and initialize a new buffer header, freeing up existing buffers
1612  *	in the bufqueues as necessary.  The new buffer is returned locked.
1613  *
1614  *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1615  *	buffer away, the caller must set B_INVAL prior to calling brelse().
1616  *
1617  *	We block if:
1618  *		We have insufficient buffer headers
1619  *		We have insufficient buffer space
1620  *		buffer_map is too fragmented ( space reservation fails )
1621  *		If we have to flush dirty buffers ( but we try to avoid this )
1622  *
1623  *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1624  *	Instead we ask the buf daemon to do it for us.  We attempt to
1625  *	avoid piecemeal wakeups of the pageout daemon.
1626  */
1627 
1628 static struct buf *
1629 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
1630 {
1631 	struct buf *bp;
1632 	struct buf *nbp;
1633 	int defrag = 0;
1634 	int nqindex;
1635 	static int flushingbufs;
1636 
1637 	GIANT_REQUIRED;
1638 
1639 	/*
1640 	 * We can't afford to block since we might be holding a vnode lock,
1641 	 * which may prevent system daemons from running.  We deal with
1642 	 * low-memory situations by proactively returning memory and running
1643 	 * async I/O rather then sync I/O.
1644 	 */
1645 
1646 	++getnewbufcalls;
1647 	--getnewbufrestarts;
1648 restart:
1649 	++getnewbufrestarts;
1650 
1651 	/*
1652 	 * Setup for scan.  If we do not have enough free buffers,
1653 	 * we setup a degenerate case that immediately fails.  Note
1654 	 * that if we are specially marked process, we are allowed to
1655 	 * dip into our reserves.
1656 	 *
1657 	 * The scanning sequence is nominally:  EMPTY->EMPTYKVA->CLEAN
1658 	 *
1659 	 * We start with EMPTYKVA.  If the list is empty we backup to EMPTY.
1660 	 * However, there are a number of cases (defragging, reusing, ...)
1661 	 * where we cannot backup.
1662 	 */
1663 	nqindex = QUEUE_EMPTYKVA;
1664 	nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1665 
1666 	if (nbp == NULL) {
1667 		/*
1668 		 * If no EMPTYKVA buffers and we are either
1669 		 * defragging or reusing, locate a CLEAN buffer
1670 		 * to free or reuse.  If bufspace useage is low
1671 		 * skip this step so we can allocate a new buffer.
1672 		 */
1673 		if (defrag || bufspace >= lobufspace) {
1674 			nqindex = QUEUE_CLEAN;
1675 			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1676 		}
1677 
1678 		/*
1679 		 * If we could not find or were not allowed to reuse a
1680 		 * CLEAN buffer, check to see if it is ok to use an EMPTY
1681 		 * buffer.  We can only use an EMPTY buffer if allocating
1682 		 * its KVA would not otherwise run us out of buffer space.
1683 		 */
1684 		if (nbp == NULL && defrag == 0 &&
1685 		    bufspace + maxsize < hibufspace) {
1686 			nqindex = QUEUE_EMPTY;
1687 			nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1688 		}
1689 	}
1690 
1691 	/*
1692 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1693 	 * depending.
1694 	 */
1695 
1696 	while ((bp = nbp) != NULL) {
1697 		int qindex = nqindex;
1698 
1699 		/*
1700 		 * Calculate next bp ( we can only use it if we do not block
1701 		 * or do other fancy things ).
1702 		 */
1703 		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1704 			switch(qindex) {
1705 			case QUEUE_EMPTY:
1706 				nqindex = QUEUE_EMPTYKVA;
1707 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1708 					break;
1709 				/* fall through */
1710 			case QUEUE_EMPTYKVA:
1711 				nqindex = QUEUE_CLEAN;
1712 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1713 					break;
1714 				/* fall through */
1715 			case QUEUE_CLEAN:
1716 				/*
1717 				 * nbp is NULL.
1718 				 */
1719 				break;
1720 			}
1721 		}
1722 
1723 		/*
1724 		 * Sanity Checks
1725 		 */
1726 		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1727 
1728 		/*
1729 		 * Note: we no longer distinguish between VMIO and non-VMIO
1730 		 * buffers.
1731 		 */
1732 
1733 		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1734 
1735 		/*
1736 		 * If we are defragging then we need a buffer with
1737 		 * b_kvasize != 0.  XXX this situation should no longer
1738 		 * occur, if defrag is non-zero the buffer's b_kvasize
1739 		 * should also be non-zero at this point.  XXX
1740 		 */
1741 		if (defrag && bp->b_kvasize == 0) {
1742 			printf("Warning: defrag empty buffer %p\n", bp);
1743 			continue;
1744 		}
1745 
1746 		/*
1747 		 * Start freeing the bp.  This is somewhat involved.  nbp
1748 		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1749 		 */
1750 
1751 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
1752 			panic("getnewbuf: locked buf");
1753 		bremfree(bp);
1754 
1755 		if (qindex == QUEUE_CLEAN) {
1756 			if (bp->b_flags & B_VMIO) {
1757 				bp->b_flags &= ~B_ASYNC;
1758 				vfs_vmio_release(bp);
1759 			}
1760 			if (bp->b_vp)
1761 				brelvp(bp);
1762 		}
1763 
1764 		/*
1765 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1766 		 * the scan from this point on.
1767 		 *
1768 		 * Get the rest of the buffer freed up.  b_kva* is still
1769 		 * valid after this operation.
1770 		 */
1771 
1772 		if (bp->b_rcred != NOCRED) {
1773 			crfree(bp->b_rcred);
1774 			bp->b_rcred = NOCRED;
1775 		}
1776 		if (bp->b_wcred != NOCRED) {
1777 			crfree(bp->b_wcred);
1778 			bp->b_wcred = NOCRED;
1779 		}
1780 		if (LIST_FIRST(&bp->b_dep) != NULL)
1781 			buf_deallocate(bp);
1782 		if (bp->b_xflags & BX_BKGRDINPROG)
1783 			panic("losing buffer 3");
1784 		LIST_REMOVE(bp, b_hash);
1785 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1786 
1787 		if (bp->b_bufsize)
1788 			allocbuf(bp, 0);
1789 
1790 		bp->b_flags = 0;
1791 		bp->b_ioflags = 0;
1792 		bp->b_xflags = 0;
1793 		bp->b_dev = NODEV;
1794 		bp->b_vp = NULL;
1795 		bp->b_blkno = bp->b_lblkno = 0;
1796 		bp->b_offset = NOOFFSET;
1797 		bp->b_iodone = 0;
1798 		bp->b_error = 0;
1799 		bp->b_resid = 0;
1800 		bp->b_bcount = 0;
1801 		bp->b_npages = 0;
1802 		bp->b_dirtyoff = bp->b_dirtyend = 0;
1803 		bp->b_magic = B_MAGIC_BIO;
1804 		bp->b_op = &buf_ops_bio;
1805 
1806 		LIST_INIT(&bp->b_dep);
1807 
1808 		/*
1809 		 * If we are defragging then free the buffer.
1810 		 */
1811 		if (defrag) {
1812 			bp->b_flags |= B_INVAL;
1813 			bfreekva(bp);
1814 			brelse(bp);
1815 			defrag = 0;
1816 			goto restart;
1817 		}
1818 
1819 		/*
1820 		 * If we are overcomitted then recover the buffer and its
1821 		 * KVM space.  This occurs in rare situations when multiple
1822 		 * processes are blocked in getnewbuf() or allocbuf().
1823 		 */
1824 		if (bufspace >= hibufspace)
1825 			flushingbufs = 1;
1826 		if (flushingbufs && bp->b_kvasize != 0) {
1827 			bp->b_flags |= B_INVAL;
1828 			bfreekva(bp);
1829 			brelse(bp);
1830 			goto restart;
1831 		}
1832 		if (bufspace < lobufspace)
1833 			flushingbufs = 0;
1834 		break;
1835 	}
1836 
1837 	/*
1838 	 * If we exhausted our list, sleep as appropriate.  We may have to
1839 	 * wakeup various daemons and write out some dirty buffers.
1840 	 *
1841 	 * Generally we are sleeping due to insufficient buffer space.
1842 	 */
1843 
1844 	if (bp == NULL) {
1845 		int flags;
1846 		char *waitmsg;
1847 
1848 		if (defrag) {
1849 			flags = VFS_BIO_NEED_BUFSPACE;
1850 			waitmsg = "nbufkv";
1851 		} else if (bufspace >= hibufspace) {
1852 			waitmsg = "nbufbs";
1853 			flags = VFS_BIO_NEED_BUFSPACE;
1854 		} else {
1855 			waitmsg = "newbuf";
1856 			flags = VFS_BIO_NEED_ANY;
1857 		}
1858 
1859 		bd_speedup();	/* heeeelp */
1860 
1861 		needsbuffer |= flags;
1862 		while (needsbuffer & flags) {
1863 			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1864 			    waitmsg, slptimeo))
1865 				return (NULL);
1866 		}
1867 	} else {
1868 		/*
1869 		 * We finally have a valid bp.  We aren't quite out of the
1870 		 * woods, we still have to reserve kva space.  In order
1871 		 * to keep fragmentation sane we only allocate kva in
1872 		 * BKVASIZE chunks.
1873 		 */
1874 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
1875 
1876 		if (maxsize != bp->b_kvasize) {
1877 			vm_offset_t addr = 0;
1878 
1879 			bfreekva(bp);
1880 
1881 			if (vm_map_findspace(buffer_map,
1882 				vm_map_min(buffer_map), maxsize, &addr)) {
1883 				/*
1884 				 * Uh oh.  Buffer map is to fragmented.  We
1885 				 * must defragment the map.
1886 				 */
1887 				++bufdefragcnt;
1888 				defrag = 1;
1889 				bp->b_flags |= B_INVAL;
1890 				brelse(bp);
1891 				goto restart;
1892 			}
1893 			if (addr) {
1894 				vm_map_insert(buffer_map, NULL, 0,
1895 					addr, addr + maxsize,
1896 					VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1897 
1898 				bp->b_kvabase = (caddr_t) addr;
1899 				bp->b_kvasize = maxsize;
1900 				bufspace += bp->b_kvasize;
1901 				++bufreusecnt;
1902 			}
1903 		}
1904 		bp->b_data = bp->b_kvabase;
1905 	}
1906 	return(bp);
1907 }
1908 
1909 /*
1910  *	buf_daemon:
1911  *
1912  *	buffer flushing daemon.  Buffers are normally flushed by the
1913  *	update daemon but if it cannot keep up this process starts to
1914  *	take the load in an attempt to prevent getnewbuf() from blocking.
1915  */
1916 
1917 static struct proc *bufdaemonproc;
1918 
1919 static struct kproc_desc buf_kp = {
1920 	"bufdaemon",
1921 	buf_daemon,
1922 	&bufdaemonproc
1923 };
1924 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
1925 
1926 static void
1927 buf_daemon()
1928 {
1929 	int s;
1930 
1931 	mtx_lock(&Giant);
1932 
1933 	/*
1934 	 * This process needs to be suspended prior to shutdown sync.
1935 	 */
1936 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, bufdaemonproc,
1937 	    SHUTDOWN_PRI_LAST);
1938 
1939 	/*
1940 	 * This process is allowed to take the buffer cache to the limit
1941 	 */
1942 	s = splbio();
1943 
1944 	for (;;) {
1945 		kthread_suspend_check(bufdaemonproc);
1946 
1947 		bd_request = 0;
1948 
1949 		/*
1950 		 * Do the flush.  Limit the amount of in-transit I/O we
1951 		 * allow to build up, otherwise we would completely saturate
1952 		 * the I/O system.  Wakeup any waiting processes before we
1953 		 * normally would so they can run in parallel with our drain.
1954 		 */
1955 		while (numdirtybuffers > lodirtybuffers) {
1956 			if (flushbufqueues() == 0)
1957 				break;
1958 			waitrunningbufspace();
1959 			numdirtywakeup((lodirtybuffers + hidirtybuffers) / 2);
1960 		}
1961 
1962 		/*
1963 		 * Only clear bd_request if we have reached our low water
1964 		 * mark.  The buf_daemon normally waits 1 second and
1965 		 * then incrementally flushes any dirty buffers that have
1966 		 * built up, within reason.
1967 		 *
1968 		 * If we were unable to hit our low water mark and couldn't
1969 		 * find any flushable buffers, we sleep half a second.
1970 		 * Otherwise we loop immediately.
1971 		 */
1972 		if (numdirtybuffers <= lodirtybuffers) {
1973 			/*
1974 			 * We reached our low water mark, reset the
1975 			 * request and sleep until we are needed again.
1976 			 * The sleep is just so the suspend code works.
1977 			 */
1978 			bd_request = 0;
1979 			tsleep(&bd_request, PVM, "psleep", hz);
1980 		} else {
1981 			/*
1982 			 * We couldn't find any flushable dirty buffers but
1983 			 * still have too many dirty buffers, we
1984 			 * have to sleep and try again.  (rare)
1985 			 */
1986 			tsleep(&bd_request, PVM, "qsleep", hz / 2);
1987 		}
1988 	}
1989 }
1990 
1991 /*
1992  *	flushbufqueues:
1993  *
1994  *	Try to flush a buffer in the dirty queue.  We must be careful to
1995  *	free up B_INVAL buffers instead of write them, which NFS is
1996  *	particularly sensitive to.
1997  */
1998 
1999 static int
2000 flushbufqueues(void)
2001 {
2002 	struct buf *bp;
2003 	int r = 0;
2004 
2005 	bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]);
2006 
2007 	while (bp) {
2008 		KASSERT((bp->b_flags & B_DELWRI), ("unexpected clean buffer %p", bp));
2009 		if ((bp->b_flags & B_DELWRI) != 0 &&
2010 		    (bp->b_xflags & BX_BKGRDINPROG) == 0) {
2011 			if (bp->b_flags & B_INVAL) {
2012 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
2013 					panic("flushbufqueues: locked buf");
2014 				bremfree(bp);
2015 				brelse(bp);
2016 				++r;
2017 				break;
2018 			}
2019 			if (LIST_FIRST(&bp->b_dep) != NULL &&
2020 			    (bp->b_flags & B_DEFERRED) == 0 &&
2021 			    buf_countdeps(bp, 0)) {
2022 				TAILQ_REMOVE(&bufqueues[QUEUE_DIRTY],
2023 				    bp, b_freelist);
2024 				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY],
2025 				    bp, b_freelist);
2026 				bp->b_flags |= B_DEFERRED;
2027 				bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]);
2028 				continue;
2029 			}
2030 			vfs_bio_awrite(bp);
2031 			++r;
2032 			break;
2033 		}
2034 		bp = TAILQ_NEXT(bp, b_freelist);
2035 	}
2036 	return (r);
2037 }
2038 
2039 /*
2040  * Check to see if a block is currently memory resident.
2041  */
2042 struct buf *
2043 incore(struct vnode * vp, daddr_t blkno)
2044 {
2045 	struct buf *bp;
2046 
2047 	int s = splbio();
2048 	bp = gbincore(vp, blkno);
2049 	splx(s);
2050 	return (bp);
2051 }
2052 
2053 /*
2054  * Returns true if no I/O is needed to access the
2055  * associated VM object.  This is like incore except
2056  * it also hunts around in the VM system for the data.
2057  */
2058 
2059 int
2060 inmem(struct vnode * vp, daddr_t blkno)
2061 {
2062 	vm_object_t obj;
2063 	vm_offset_t toff, tinc, size;
2064 	vm_page_t m;
2065 	vm_ooffset_t off;
2066 
2067 	GIANT_REQUIRED;
2068 
2069 	if (incore(vp, blkno))
2070 		return 1;
2071 	if (vp->v_mount == NULL)
2072 		return 0;
2073 	if (VOP_GETVOBJECT(vp, &obj) != 0 || (vp->v_flag & VOBJBUF) == 0)
2074 		return 0;
2075 
2076 	size = PAGE_SIZE;
2077 	if (size > vp->v_mount->mnt_stat.f_iosize)
2078 		size = vp->v_mount->mnt_stat.f_iosize;
2079 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
2080 
2081 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
2082 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
2083 		if (!m)
2084 			goto notinmem;
2085 		tinc = size;
2086 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
2087 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
2088 		if (vm_page_is_valid(m,
2089 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
2090 			goto notinmem;
2091 	}
2092 	return 1;
2093 
2094 notinmem:
2095 	return (0);
2096 }
2097 
2098 /*
2099  *	vfs_setdirty:
2100  *
2101  *	Sets the dirty range for a buffer based on the status of the dirty
2102  *	bits in the pages comprising the buffer.
2103  *
2104  *	The range is limited to the size of the buffer.
2105  *
2106  *	This routine is primarily used by NFS, but is generalized for the
2107  *	B_VMIO case.
2108  */
2109 static void
2110 vfs_setdirty(struct buf *bp)
2111 {
2112 	int i;
2113 	vm_object_t object;
2114 
2115 	GIANT_REQUIRED;
2116 	/*
2117 	 * Degenerate case - empty buffer
2118 	 */
2119 
2120 	if (bp->b_bufsize == 0)
2121 		return;
2122 
2123 	/*
2124 	 * We qualify the scan for modified pages on whether the
2125 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
2126 	 * is not cleared simply by protecting pages off.
2127 	 */
2128 
2129 	if ((bp->b_flags & B_VMIO) == 0)
2130 		return;
2131 
2132 	object = bp->b_pages[0]->object;
2133 
2134 	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
2135 		printf("Warning: object %p writeable but not mightbedirty\n", object);
2136 	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
2137 		printf("Warning: object %p mightbedirty but not writeable\n", object);
2138 
2139 	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
2140 		vm_offset_t boffset;
2141 		vm_offset_t eoffset;
2142 
2143 		/*
2144 		 * test the pages to see if they have been modified directly
2145 		 * by users through the VM system.
2146 		 */
2147 		for (i = 0; i < bp->b_npages; i++) {
2148 			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2149 			vm_page_test_dirty(bp->b_pages[i]);
2150 		}
2151 
2152 		/*
2153 		 * Calculate the encompassing dirty range, boffset and eoffset,
2154 		 * (eoffset - boffset) bytes.
2155 		 */
2156 
2157 		for (i = 0; i < bp->b_npages; i++) {
2158 			if (bp->b_pages[i]->dirty)
2159 				break;
2160 		}
2161 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2162 
2163 		for (i = bp->b_npages - 1; i >= 0; --i) {
2164 			if (bp->b_pages[i]->dirty) {
2165 				break;
2166 			}
2167 		}
2168 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
2169 
2170 		/*
2171 		 * Fit it to the buffer.
2172 		 */
2173 
2174 		if (eoffset > bp->b_bcount)
2175 			eoffset = bp->b_bcount;
2176 
2177 		/*
2178 		 * If we have a good dirty range, merge with the existing
2179 		 * dirty range.
2180 		 */
2181 
2182 		if (boffset < eoffset) {
2183 			if (bp->b_dirtyoff > boffset)
2184 				bp->b_dirtyoff = boffset;
2185 			if (bp->b_dirtyend < eoffset)
2186 				bp->b_dirtyend = eoffset;
2187 		}
2188 	}
2189 }
2190 
2191 /*
2192  *	getblk:
2193  *
2194  *	Get a block given a specified block and offset into a file/device.
2195  *	The buffers B_DONE bit will be cleared on return, making it almost
2196  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
2197  *	return.  The caller should clear B_INVAL prior to initiating a
2198  *	READ.
2199  *
2200  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
2201  *	an existing buffer.
2202  *
2203  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
2204  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
2205  *	and then cleared based on the backing VM.  If the previous buffer is
2206  *	non-0-sized but invalid, B_CACHE will be cleared.
2207  *
2208  *	If getblk() must create a new buffer, the new buffer is returned with
2209  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
2210  *	case it is returned with B_INVAL clear and B_CACHE set based on the
2211  *	backing VM.
2212  *
2213  *	getblk() also forces a BUF_WRITE() for any B_DELWRI buffer whos
2214  *	B_CACHE bit is clear.
2215  *
2216  *	What this means, basically, is that the caller should use B_CACHE to
2217  *	determine whether the buffer is fully valid or not and should clear
2218  *	B_INVAL prior to issuing a read.  If the caller intends to validate
2219  *	the buffer by loading its data area with something, the caller needs
2220  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
2221  *	the caller should set B_CACHE ( as an optimization ), else the caller
2222  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
2223  *	a write attempt or if it was a successfull read.  If the caller
2224  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
2225  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
2226  */
2227 struct buf *
2228 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
2229 {
2230 	struct buf *bp;
2231 	int s;
2232 	struct bufhashhdr *bh;
2233 
2234 	if (size > MAXBSIZE)
2235 		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
2236 
2237 	s = splbio();
2238 loop:
2239 	/*
2240 	 * Block if we are low on buffers.   Certain processes are allowed
2241 	 * to completely exhaust the buffer cache.
2242          *
2243          * If this check ever becomes a bottleneck it may be better to
2244          * move it into the else, when gbincore() fails.  At the moment
2245          * it isn't a problem.
2246 	 *
2247 	 * XXX remove if 0 sections (clean this up after its proven)
2248          */
2249 	if (numfreebuffers == 0) {
2250 		if (curthread == PCPU_GET(idlethread))
2251 			return NULL;
2252 		needsbuffer |= VFS_BIO_NEED_ANY;
2253 	}
2254 
2255 	if ((bp = gbincore(vp, blkno))) {
2256 		/*
2257 		 * Buffer is in-core.  If the buffer is not busy, it must
2258 		 * be on a queue.
2259 		 */
2260 
2261 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
2262 			if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
2263 			    "getblk", slpflag, slptimeo) == ENOLCK)
2264 				goto loop;
2265 			splx(s);
2266 			return (struct buf *) NULL;
2267 		}
2268 
2269 		/*
2270 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2271 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
2272 		 * and for a VMIO buffer B_CACHE is adjusted according to the
2273 		 * backing VM cache.
2274 		 */
2275 		if (bp->b_flags & B_INVAL)
2276 			bp->b_flags &= ~B_CACHE;
2277 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2278 			bp->b_flags |= B_CACHE;
2279 		bremfree(bp);
2280 
2281 		/*
2282 		 * check for size inconsistancies for non-VMIO case.
2283 		 */
2284 
2285 		if (bp->b_bcount != size) {
2286 			if ((bp->b_flags & B_VMIO) == 0 ||
2287 			    (size > bp->b_kvasize)) {
2288 				if (bp->b_flags & B_DELWRI) {
2289 					bp->b_flags |= B_NOCACHE;
2290 					BUF_WRITE(bp);
2291 				} else {
2292 					if ((bp->b_flags & B_VMIO) &&
2293 					   (LIST_FIRST(&bp->b_dep) == NULL)) {
2294 						bp->b_flags |= B_RELBUF;
2295 						brelse(bp);
2296 					} else {
2297 						bp->b_flags |= B_NOCACHE;
2298 						BUF_WRITE(bp);
2299 					}
2300 				}
2301 				goto loop;
2302 			}
2303 		}
2304 
2305 		/*
2306 		 * If the size is inconsistant in the VMIO case, we can resize
2307 		 * the buffer.  This might lead to B_CACHE getting set or
2308 		 * cleared.  If the size has not changed, B_CACHE remains
2309 		 * unchanged from its previous state.
2310 		 */
2311 
2312 		if (bp->b_bcount != size)
2313 			allocbuf(bp, size);
2314 
2315 		KASSERT(bp->b_offset != NOOFFSET,
2316 		    ("getblk: no buffer offset"));
2317 
2318 		/*
2319 		 * A buffer with B_DELWRI set and B_CACHE clear must
2320 		 * be committed before we can return the buffer in
2321 		 * order to prevent the caller from issuing a read
2322 		 * ( due to B_CACHE not being set ) and overwriting
2323 		 * it.
2324 		 *
2325 		 * Most callers, including NFS and FFS, need this to
2326 		 * operate properly either because they assume they
2327 		 * can issue a read if B_CACHE is not set, or because
2328 		 * ( for example ) an uncached B_DELWRI might loop due
2329 		 * to softupdates re-dirtying the buffer.  In the latter
2330 		 * case, B_CACHE is set after the first write completes,
2331 		 * preventing further loops.
2332 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
2333 		 * above while extending the buffer, we cannot allow the
2334 		 * buffer to remain with B_CACHE set after the write
2335 		 * completes or it will represent a corrupt state.  To
2336 		 * deal with this we set B_NOCACHE to scrap the buffer
2337 		 * after the write.
2338 		 *
2339 		 * We might be able to do something fancy, like setting
2340 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
2341 		 * so the below call doesn't set B_CACHE, but that gets real
2342 		 * confusing.  This is much easier.
2343 		 */
2344 
2345 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2346 			bp->b_flags |= B_NOCACHE;
2347 			BUF_WRITE(bp);
2348 			goto loop;
2349 		}
2350 
2351 		splx(s);
2352 		bp->b_flags &= ~B_DONE;
2353 	} else {
2354 		/*
2355 		 * Buffer is not in-core, create new buffer.  The buffer
2356 		 * returned by getnewbuf() is locked.  Note that the returned
2357 		 * buffer is also considered valid (not marked B_INVAL).
2358 		 */
2359 		int bsize, maxsize, vmio;
2360 		off_t offset;
2361 
2362 		if (vn_isdisk(vp, NULL))
2363 			bsize = DEV_BSIZE;
2364 		else if (vp->v_mountedhere)
2365 			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
2366 		else if (vp->v_mount)
2367 			bsize = vp->v_mount->mnt_stat.f_iosize;
2368 		else
2369 			bsize = size;
2370 
2371 		offset = (off_t)blkno * bsize;
2372 		vmio = (VOP_GETVOBJECT(vp, NULL) == 0) && (vp->v_flag & VOBJBUF);
2373 		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2374 		maxsize = imax(maxsize, bsize);
2375 
2376 		if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
2377 			if (slpflag || slptimeo) {
2378 				splx(s);
2379 				return NULL;
2380 			}
2381 			goto loop;
2382 		}
2383 
2384 		/*
2385 		 * This code is used to make sure that a buffer is not
2386 		 * created while the getnewbuf routine is blocked.
2387 		 * This can be a problem whether the vnode is locked or not.
2388 		 * If the buffer is created out from under us, we have to
2389 		 * throw away the one we just created.  There is now window
2390 		 * race because we are safely running at splbio() from the
2391 		 * point of the duplicate buffer creation through to here,
2392 		 * and we've locked the buffer.
2393 		 */
2394 		if (gbincore(vp, blkno)) {
2395 			bp->b_flags |= B_INVAL;
2396 			brelse(bp);
2397 			goto loop;
2398 		}
2399 
2400 		/*
2401 		 * Insert the buffer into the hash, so that it can
2402 		 * be found by incore.
2403 		 */
2404 		bp->b_blkno = bp->b_lblkno = blkno;
2405 		bp->b_offset = offset;
2406 
2407 		bgetvp(vp, bp);
2408 		LIST_REMOVE(bp, b_hash);
2409 		bh = bufhash(vp, blkno);
2410 		LIST_INSERT_HEAD(bh, bp, b_hash);
2411 
2412 		/*
2413 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2414 		 * buffer size starts out as 0, B_CACHE will be set by
2415 		 * allocbuf() for the VMIO case prior to it testing the
2416 		 * backing store for validity.
2417 		 */
2418 
2419 		if (vmio) {
2420 			bp->b_flags |= B_VMIO;
2421 #if defined(VFS_BIO_DEBUG)
2422 			if (vp->v_type != VREG)
2423 				printf("getblk: vmioing file type %d???\n", vp->v_type);
2424 #endif
2425 		} else {
2426 			bp->b_flags &= ~B_VMIO;
2427 		}
2428 
2429 		allocbuf(bp, size);
2430 
2431 		splx(s);
2432 		bp->b_flags &= ~B_DONE;
2433 	}
2434 	return (bp);
2435 }
2436 
2437 /*
2438  * Get an empty, disassociated buffer of given size.  The buffer is initially
2439  * set to B_INVAL.
2440  */
2441 struct buf *
2442 geteblk(int size)
2443 {
2444 	struct buf *bp;
2445 	int s;
2446 	int maxsize;
2447 
2448 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
2449 
2450 	s = splbio();
2451 	while ((bp = getnewbuf(0, 0, size, maxsize)) == 0);
2452 	splx(s);
2453 	allocbuf(bp, size);
2454 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2455 	return (bp);
2456 }
2457 
2458 
2459 /*
2460  * This code constitutes the buffer memory from either anonymous system
2461  * memory (in the case of non-VMIO operations) or from an associated
2462  * VM object (in the case of VMIO operations).  This code is able to
2463  * resize a buffer up or down.
2464  *
2465  * Note that this code is tricky, and has many complications to resolve
2466  * deadlock or inconsistant data situations.  Tread lightly!!!
2467  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2468  * the caller.  Calling this code willy nilly can result in the loss of data.
2469  *
2470  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2471  * B_CACHE for the non-VMIO case.
2472  */
2473 
2474 int
2475 allocbuf(struct buf *bp, int size)
2476 {
2477 	int newbsize, mbsize;
2478 	int i;
2479 
2480 	GIANT_REQUIRED;
2481 
2482 	if (BUF_REFCNT(bp) == 0)
2483 		panic("allocbuf: buffer not busy");
2484 
2485 	if (bp->b_kvasize < size)
2486 		panic("allocbuf: buffer too small");
2487 
2488 	if ((bp->b_flags & B_VMIO) == 0) {
2489 		caddr_t origbuf;
2490 		int origbufsize;
2491 		/*
2492 		 * Just get anonymous memory from the kernel.  Don't
2493 		 * mess with B_CACHE.
2494 		 */
2495 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2496 		if (bp->b_flags & B_MALLOC)
2497 			newbsize = mbsize;
2498 		else
2499 			newbsize = round_page(size);
2500 
2501 		if (newbsize < bp->b_bufsize) {
2502 			/*
2503 			 * malloced buffers are not shrunk
2504 			 */
2505 			if (bp->b_flags & B_MALLOC) {
2506 				if (newbsize) {
2507 					bp->b_bcount = size;
2508 				} else {
2509 					free(bp->b_data, M_BIOBUF);
2510 					if (bp->b_bufsize) {
2511 						bufmallocspace -= bp->b_bufsize;
2512 						bufspacewakeup();
2513 						bp->b_bufsize = 0;
2514 					}
2515 					bp->b_data = bp->b_kvabase;
2516 					bp->b_bcount = 0;
2517 					bp->b_flags &= ~B_MALLOC;
2518 				}
2519 				return 1;
2520 			}
2521 			vm_hold_free_pages(
2522 			    bp,
2523 			    (vm_offset_t) bp->b_data + newbsize,
2524 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2525 		} else if (newbsize > bp->b_bufsize) {
2526 			/*
2527 			 * We only use malloced memory on the first allocation.
2528 			 * and revert to page-allocated memory when the buffer
2529 			 * grows.
2530 			 */
2531 			if ( (bufmallocspace < maxbufmallocspace) &&
2532 				(bp->b_bufsize == 0) &&
2533 				(mbsize <= PAGE_SIZE/2)) {
2534 
2535 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2536 				bp->b_bufsize = mbsize;
2537 				bp->b_bcount = size;
2538 				bp->b_flags |= B_MALLOC;
2539 				bufmallocspace += mbsize;
2540 				return 1;
2541 			}
2542 			origbuf = NULL;
2543 			origbufsize = 0;
2544 			/*
2545 			 * If the buffer is growing on its other-than-first allocation,
2546 			 * then we revert to the page-allocation scheme.
2547 			 */
2548 			if (bp->b_flags & B_MALLOC) {
2549 				origbuf = bp->b_data;
2550 				origbufsize = bp->b_bufsize;
2551 				bp->b_data = bp->b_kvabase;
2552 				if (bp->b_bufsize) {
2553 					bufmallocspace -= bp->b_bufsize;
2554 					bufspacewakeup();
2555 					bp->b_bufsize = 0;
2556 				}
2557 				bp->b_flags &= ~B_MALLOC;
2558 				newbsize = round_page(newbsize);
2559 			}
2560 			vm_hold_load_pages(
2561 			    bp,
2562 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2563 			    (vm_offset_t) bp->b_data + newbsize);
2564 			if (origbuf) {
2565 				bcopy(origbuf, bp->b_data, origbufsize);
2566 				free(origbuf, M_BIOBUF);
2567 			}
2568 		}
2569 	} else {
2570 		vm_page_t m;
2571 		int desiredpages;
2572 
2573 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2574 		desiredpages = (size == 0) ? 0 :
2575 			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2576 
2577 		if (bp->b_flags & B_MALLOC)
2578 			panic("allocbuf: VMIO buffer can't be malloced");
2579 		/*
2580 		 * Set B_CACHE initially if buffer is 0 length or will become
2581 		 * 0-length.
2582 		 */
2583 		if (size == 0 || bp->b_bufsize == 0)
2584 			bp->b_flags |= B_CACHE;
2585 
2586 		if (newbsize < bp->b_bufsize) {
2587 			/*
2588 			 * DEV_BSIZE aligned new buffer size is less then the
2589 			 * DEV_BSIZE aligned existing buffer size.  Figure out
2590 			 * if we have to remove any pages.
2591 			 */
2592 			if (desiredpages < bp->b_npages) {
2593 				for (i = desiredpages; i < bp->b_npages; i++) {
2594 					/*
2595 					 * the page is not freed here -- it
2596 					 * is the responsibility of
2597 					 * vnode_pager_setsize
2598 					 */
2599 					m = bp->b_pages[i];
2600 					KASSERT(m != bogus_page,
2601 					    ("allocbuf: bogus page found"));
2602 					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2603 						;
2604 
2605 					bp->b_pages[i] = NULL;
2606 					vm_page_unwire(m, 0);
2607 				}
2608 				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2609 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2610 				bp->b_npages = desiredpages;
2611 			}
2612 		} else if (size > bp->b_bcount) {
2613 			/*
2614 			 * We are growing the buffer, possibly in a
2615 			 * byte-granular fashion.
2616 			 */
2617 			struct vnode *vp;
2618 			vm_object_t obj;
2619 			vm_offset_t toff;
2620 			vm_offset_t tinc;
2621 
2622 			/*
2623 			 * Step 1, bring in the VM pages from the object,
2624 			 * allocating them if necessary.  We must clear
2625 			 * B_CACHE if these pages are not valid for the
2626 			 * range covered by the buffer.
2627 			 */
2628 
2629 			vp = bp->b_vp;
2630 			VOP_GETVOBJECT(vp, &obj);
2631 
2632 			while (bp->b_npages < desiredpages) {
2633 				vm_page_t m;
2634 				vm_pindex_t pi;
2635 
2636 				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2637 				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2638 					/*
2639 					 * note: must allocate system pages
2640 					 * since blocking here could intefere
2641 					 * with paging I/O, no matter which
2642 					 * process we are.
2643 					 */
2644 					m = vm_page_alloc(obj, pi, VM_ALLOC_SYSTEM);
2645 					if (m == NULL) {
2646 						VM_WAIT;
2647 						vm_pageout_deficit += desiredpages - bp->b_npages;
2648 					} else {
2649 						vm_page_wire(m);
2650 						vm_page_wakeup(m);
2651 						bp->b_flags &= ~B_CACHE;
2652 						bp->b_pages[bp->b_npages] = m;
2653 						++bp->b_npages;
2654 					}
2655 					continue;
2656 				}
2657 
2658 				/*
2659 				 * We found a page.  If we have to sleep on it,
2660 				 * retry because it might have gotten freed out
2661 				 * from under us.
2662 				 *
2663 				 * We can only test PG_BUSY here.  Blocking on
2664 				 * m->busy might lead to a deadlock:
2665 				 *
2666 				 *  vm_fault->getpages->cluster_read->allocbuf
2667 				 *
2668 				 */
2669 
2670 				if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2671 					continue;
2672 
2673 				/*
2674 				 * We have a good page.  Should we wakeup the
2675 				 * page daemon?
2676 				 */
2677 				if ((curproc != pageproc) &&
2678 				    ((m->queue - m->pc) == PQ_CACHE) &&
2679 				    ((cnt.v_free_count + cnt.v_cache_count) <
2680 					(cnt.v_free_min + cnt.v_cache_min))) {
2681 					pagedaemon_wakeup();
2682 				}
2683 				vm_page_flag_clear(m, PG_ZERO);
2684 				vm_page_wire(m);
2685 				bp->b_pages[bp->b_npages] = m;
2686 				++bp->b_npages;
2687 			}
2688 
2689 			/*
2690 			 * Step 2.  We've loaded the pages into the buffer,
2691 			 * we have to figure out if we can still have B_CACHE
2692 			 * set.  Note that B_CACHE is set according to the
2693 			 * byte-granular range ( bcount and size ), new the
2694 			 * aligned range ( newbsize ).
2695 			 *
2696 			 * The VM test is against m->valid, which is DEV_BSIZE
2697 			 * aligned.  Needless to say, the validity of the data
2698 			 * needs to also be DEV_BSIZE aligned.  Note that this
2699 			 * fails with NFS if the server or some other client
2700 			 * extends the file's EOF.  If our buffer is resized,
2701 			 * B_CACHE may remain set! XXX
2702 			 */
2703 
2704 			toff = bp->b_bcount;
2705 			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2706 
2707 			while ((bp->b_flags & B_CACHE) && toff < size) {
2708 				vm_pindex_t pi;
2709 
2710 				if (tinc > (size - toff))
2711 					tinc = size - toff;
2712 
2713 				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2714 				    PAGE_SHIFT;
2715 
2716 				vfs_buf_test_cache(
2717 				    bp,
2718 				    bp->b_offset,
2719 				    toff,
2720 				    tinc,
2721 				    bp->b_pages[pi]
2722 				);
2723 				toff += tinc;
2724 				tinc = PAGE_SIZE;
2725 			}
2726 
2727 			/*
2728 			 * Step 3, fixup the KVM pmap.  Remember that
2729 			 * bp->b_data is relative to bp->b_offset, but
2730 			 * bp->b_offset may be offset into the first page.
2731 			 */
2732 
2733 			bp->b_data = (caddr_t)
2734 			    trunc_page((vm_offset_t)bp->b_data);
2735 			pmap_qenter(
2736 			    (vm_offset_t)bp->b_data,
2737 			    bp->b_pages,
2738 			    bp->b_npages
2739 			);
2740 
2741 			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2742 			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2743 		}
2744 	}
2745 	if (newbsize < bp->b_bufsize)
2746 		bufspacewakeup();
2747 	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2748 	bp->b_bcount = size;		/* requested buffer size	*/
2749 	return 1;
2750 }
2751 
2752 /*
2753  *	bufwait:
2754  *
2755  *	Wait for buffer I/O completion, returning error status.  The buffer
2756  *	is left locked and B_DONE on return.  B_EINTR is converted into a EINTR
2757  *	error and cleared.
2758  */
2759 int
2760 bufwait(register struct buf * bp)
2761 {
2762 	int s;
2763 
2764 	s = splbio();
2765 	while ((bp->b_flags & B_DONE) == 0) {
2766 		if (bp->b_iocmd == BIO_READ)
2767 			tsleep(bp, PRIBIO, "biord", 0);
2768 		else
2769 			tsleep(bp, PRIBIO, "biowr", 0);
2770 	}
2771 	splx(s);
2772 	if (bp->b_flags & B_EINTR) {
2773 		bp->b_flags &= ~B_EINTR;
2774 		return (EINTR);
2775 	}
2776 	if (bp->b_ioflags & BIO_ERROR) {
2777 		return (bp->b_error ? bp->b_error : EIO);
2778 	} else {
2779 		return (0);
2780 	}
2781 }
2782 
2783  /*
2784   * Call back function from struct bio back up to struct buf.
2785   * The corresponding initialization lives in sys/conf.h:DEV_STRATEGY().
2786   */
2787 void
2788 bufdonebio(struct bio *bp)
2789 {
2790 	bufdone(bp->bio_caller2);
2791 }
2792 
2793 /*
2794  *	bufdone:
2795  *
2796  *	Finish I/O on a buffer, optionally calling a completion function.
2797  *	This is usually called from an interrupt so process blocking is
2798  *	not allowed.
2799  *
2800  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2801  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
2802  *	assuming B_INVAL is clear.
2803  *
2804  *	For the VMIO case, we set B_CACHE if the op was a read and no
2805  *	read error occured, or if the op was a write.  B_CACHE is never
2806  *	set if the buffer is invalid or otherwise uncacheable.
2807  *
2808  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
2809  *	initiator to leave B_INVAL set to brelse the buffer out of existance
2810  *	in the biodone routine.
2811  */
2812 void
2813 bufdone(struct buf *bp)
2814 {
2815 	int s, error;
2816 	void    (*biodone)(struct buf *);
2817 
2818 	GIANT_REQUIRED;
2819 
2820 	s = splbio();
2821 
2822 	KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
2823 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2824 
2825 	bp->b_flags |= B_DONE;
2826 	runningbufwakeup(bp);
2827 
2828 	if (bp->b_iocmd == BIO_DELETE) {
2829 		brelse(bp);
2830 		splx(s);
2831 		return;
2832 	}
2833 
2834 	if (bp->b_iocmd == BIO_WRITE) {
2835 		vwakeup(bp);
2836 	}
2837 
2838 	/* call optional completion function if requested */
2839 	if (bp->b_iodone != NULL) {
2840 		biodone = bp->b_iodone;
2841 		bp->b_iodone = NULL;
2842 		(*biodone) (bp);
2843 		splx(s);
2844 		return;
2845 	}
2846 	if (LIST_FIRST(&bp->b_dep) != NULL)
2847 		buf_complete(bp);
2848 
2849 	if (bp->b_flags & B_VMIO) {
2850 		int i;
2851 		vm_ooffset_t foff;
2852 		vm_page_t m;
2853 		vm_object_t obj;
2854 		int iosize;
2855 		struct vnode *vp = bp->b_vp;
2856 
2857 		error = VOP_GETVOBJECT(vp, &obj);
2858 
2859 #if defined(VFS_BIO_DEBUG)
2860 		if (vp->v_usecount == 0) {
2861 			panic("biodone: zero vnode ref count");
2862 		}
2863 
2864 		if (error) {
2865 			panic("biodone: missing VM object");
2866 		}
2867 
2868 		if ((vp->v_flag & VOBJBUF) == 0) {
2869 			panic("biodone: vnode is not setup for merged cache");
2870 		}
2871 #endif
2872 
2873 		foff = bp->b_offset;
2874 		KASSERT(bp->b_offset != NOOFFSET,
2875 		    ("biodone: no buffer offset"));
2876 
2877 		if (error) {
2878 			panic("biodone: no object");
2879 		}
2880 #if defined(VFS_BIO_DEBUG)
2881 		if (obj->paging_in_progress < bp->b_npages) {
2882 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2883 			    obj->paging_in_progress, bp->b_npages);
2884 		}
2885 #endif
2886 
2887 		/*
2888 		 * Set B_CACHE if the op was a normal read and no error
2889 		 * occured.  B_CACHE is set for writes in the b*write()
2890 		 * routines.
2891 		 */
2892 		iosize = bp->b_bcount - bp->b_resid;
2893 		if (bp->b_iocmd == BIO_READ &&
2894 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
2895 		    !(bp->b_ioflags & BIO_ERROR)) {
2896 			bp->b_flags |= B_CACHE;
2897 		}
2898 
2899 		for (i = 0; i < bp->b_npages; i++) {
2900 			int bogusflag = 0;
2901 			int resid;
2902 
2903 			resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2904 			if (resid > iosize)
2905 				resid = iosize;
2906 
2907 			/*
2908 			 * cleanup bogus pages, restoring the originals
2909 			 */
2910 			m = bp->b_pages[i];
2911 			if (m == bogus_page) {
2912 				bogusflag = 1;
2913 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2914 				if (m == NULL)
2915 					panic("biodone: page disappeared!");
2916 				bp->b_pages[i] = m;
2917 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2918 			}
2919 #if defined(VFS_BIO_DEBUG)
2920 			if (OFF_TO_IDX(foff) != m->pindex) {
2921 				printf(
2922 "biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2923 				    (unsigned long)foff, m->pindex);
2924 			}
2925 #endif
2926 
2927 			/*
2928 			 * In the write case, the valid and clean bits are
2929 			 * already changed correctly ( see bdwrite() ), so we
2930 			 * only need to do this here in the read case.
2931 			 */
2932 			if ((bp->b_iocmd == BIO_READ) && !bogusflag && resid > 0) {
2933 				vfs_page_set_valid(bp, foff, i, m);
2934 			}
2935 			vm_page_flag_clear(m, PG_ZERO);
2936 
2937 			/*
2938 			 * when debugging new filesystems or buffer I/O methods, this
2939 			 * is the most common error that pops up.  if you see this, you
2940 			 * have not set the page busy flag correctly!!!
2941 			 */
2942 			if (m->busy == 0) {
2943 				printf("biodone: page busy < 0, "
2944 				    "pindex: %d, foff: 0x(%x,%x), "
2945 				    "resid: %d, index: %d\n",
2946 				    (int) m->pindex, (int)(foff >> 32),
2947 						(int) foff & 0xffffffff, resid, i);
2948 				if (!vn_isdisk(vp, NULL))
2949 					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2950 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2951 					    (int) bp->b_lblkno,
2952 					    bp->b_flags, bp->b_npages);
2953 				else
2954 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2955 					    (int) bp->b_lblkno,
2956 					    bp->b_flags, bp->b_npages);
2957 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2958 				    m->valid, m->dirty, m->wire_count);
2959 				panic("biodone: page busy < 0\n");
2960 			}
2961 			vm_page_io_finish(m);
2962 			vm_object_pip_subtract(obj, 1);
2963 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
2964 			iosize -= resid;
2965 		}
2966 		if (obj)
2967 			vm_object_pip_wakeupn(obj, 0);
2968 	}
2969 
2970 	/*
2971 	 * For asynchronous completions, release the buffer now. The brelse
2972 	 * will do a wakeup there if necessary - so no need to do a wakeup
2973 	 * here in the async case. The sync case always needs to do a wakeup.
2974 	 */
2975 
2976 	if (bp->b_flags & B_ASYNC) {
2977 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) || (bp->b_ioflags & BIO_ERROR))
2978 			brelse(bp);
2979 		else
2980 			bqrelse(bp);
2981 	} else {
2982 		wakeup(bp);
2983 	}
2984 	splx(s);
2985 }
2986 
2987 /*
2988  * This routine is called in lieu of iodone in the case of
2989  * incomplete I/O.  This keeps the busy status for pages
2990  * consistant.
2991  */
2992 void
2993 vfs_unbusy_pages(struct buf * bp)
2994 {
2995 	int i;
2996 
2997 	GIANT_REQUIRED;
2998 
2999 	runningbufwakeup(bp);
3000 	if (bp->b_flags & B_VMIO) {
3001 		struct vnode *vp = bp->b_vp;
3002 		vm_object_t obj;
3003 
3004 		VOP_GETVOBJECT(vp, &obj);
3005 
3006 		for (i = 0; i < bp->b_npages; i++) {
3007 			vm_page_t m = bp->b_pages[i];
3008 
3009 			if (m == bogus_page) {
3010 				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
3011 				if (!m) {
3012 					panic("vfs_unbusy_pages: page missing\n");
3013 				}
3014 				bp->b_pages[i] = m;
3015 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
3016 			}
3017 			vm_object_pip_subtract(obj, 1);
3018 			vm_page_flag_clear(m, PG_ZERO);
3019 			vm_page_io_finish(m);
3020 		}
3021 		vm_object_pip_wakeupn(obj, 0);
3022 	}
3023 }
3024 
3025 /*
3026  * vfs_page_set_valid:
3027  *
3028  *	Set the valid bits in a page based on the supplied offset.   The
3029  *	range is restricted to the buffer's size.
3030  *
3031  *	This routine is typically called after a read completes.
3032  */
3033 static void
3034 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
3035 {
3036 	vm_ooffset_t soff, eoff;
3037 
3038 	GIANT_REQUIRED;
3039 	/*
3040 	 * Start and end offsets in buffer.  eoff - soff may not cross a
3041 	 * page boundry or cross the end of the buffer.  The end of the
3042 	 * buffer, in this case, is our file EOF, not the allocation size
3043 	 * of the buffer.
3044 	 */
3045 	soff = off;
3046 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3047 	if (eoff > bp->b_offset + bp->b_bcount)
3048 		eoff = bp->b_offset + bp->b_bcount;
3049 
3050 	/*
3051 	 * Set valid range.  This is typically the entire buffer and thus the
3052 	 * entire page.
3053 	 */
3054 	if (eoff > soff) {
3055 		vm_page_set_validclean(
3056 		    m,
3057 		   (vm_offset_t) (soff & PAGE_MASK),
3058 		   (vm_offset_t) (eoff - soff)
3059 		);
3060 	}
3061 }
3062 
3063 /*
3064  * This routine is called before a device strategy routine.
3065  * It is used to tell the VM system that paging I/O is in
3066  * progress, and treat the pages associated with the buffer
3067  * almost as being PG_BUSY.  Also the object paging_in_progress
3068  * flag is handled to make sure that the object doesn't become
3069  * inconsistant.
3070  *
3071  * Since I/O has not been initiated yet, certain buffer flags
3072  * such as BIO_ERROR or B_INVAL may be in an inconsistant state
3073  * and should be ignored.
3074  */
3075 void
3076 vfs_busy_pages(struct buf * bp, int clear_modify)
3077 {
3078 	int i, bogus;
3079 
3080 	GIANT_REQUIRED;
3081 
3082 	if (bp->b_flags & B_VMIO) {
3083 		struct vnode *vp = bp->b_vp;
3084 		vm_object_t obj;
3085 		vm_ooffset_t foff;
3086 
3087 		VOP_GETVOBJECT(vp, &obj);
3088 		foff = bp->b_offset;
3089 		KASSERT(bp->b_offset != NOOFFSET,
3090 		    ("vfs_busy_pages: no buffer offset"));
3091 		vfs_setdirty(bp);
3092 
3093 retry:
3094 		for (i = 0; i < bp->b_npages; i++) {
3095 			vm_page_t m = bp->b_pages[i];
3096 			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
3097 				goto retry;
3098 		}
3099 
3100 		bogus = 0;
3101 		for (i = 0; i < bp->b_npages; i++) {
3102 			vm_page_t m = bp->b_pages[i];
3103 
3104 			vm_page_flag_clear(m, PG_ZERO);
3105 			if ((bp->b_flags & B_CLUSTER) == 0) {
3106 				vm_object_pip_add(obj, 1);
3107 				vm_page_io_start(m);
3108 			}
3109 
3110 			/*
3111 			 * When readying a buffer for a read ( i.e
3112 			 * clear_modify == 0 ), it is important to do
3113 			 * bogus_page replacement for valid pages in
3114 			 * partially instantiated buffers.  Partially
3115 			 * instantiated buffers can, in turn, occur when
3116 			 * reconstituting a buffer from its VM backing store
3117 			 * base.  We only have to do this if B_CACHE is
3118 			 * clear ( which causes the I/O to occur in the
3119 			 * first place ).  The replacement prevents the read
3120 			 * I/O from overwriting potentially dirty VM-backed
3121 			 * pages.  XXX bogus page replacement is, uh, bogus.
3122 			 * It may not work properly with small-block devices.
3123 			 * We need to find a better way.
3124 			 */
3125 
3126 			vm_page_protect(m, VM_PROT_NONE);
3127 			if (clear_modify)
3128 				vfs_page_set_valid(bp, foff, i, m);
3129 			else if (m->valid == VM_PAGE_BITS_ALL &&
3130 				(bp->b_flags & B_CACHE) == 0) {
3131 				bp->b_pages[i] = bogus_page;
3132 				bogus++;
3133 			}
3134 			foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3135 		}
3136 		if (bogus)
3137 			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
3138 	}
3139 }
3140 
3141 /*
3142  * Tell the VM system that the pages associated with this buffer
3143  * are clean.  This is used for delayed writes where the data is
3144  * going to go to disk eventually without additional VM intevention.
3145  *
3146  * Note that while we only really need to clean through to b_bcount, we
3147  * just go ahead and clean through to b_bufsize.
3148  */
3149 static void
3150 vfs_clean_pages(struct buf * bp)
3151 {
3152 	int i;
3153 
3154 	GIANT_REQUIRED;
3155 
3156 	if (bp->b_flags & B_VMIO) {
3157 		vm_ooffset_t foff;
3158 
3159 		foff = bp->b_offset;
3160 		KASSERT(bp->b_offset != NOOFFSET,
3161 		    ("vfs_clean_pages: no buffer offset"));
3162 		for (i = 0; i < bp->b_npages; i++) {
3163 			vm_page_t m = bp->b_pages[i];
3164 			vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3165 			vm_ooffset_t eoff = noff;
3166 
3167 			if (eoff > bp->b_offset + bp->b_bufsize)
3168 				eoff = bp->b_offset + bp->b_bufsize;
3169 			vfs_page_set_valid(bp, foff, i, m);
3170 			/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3171 			foff = noff;
3172 		}
3173 	}
3174 }
3175 
3176 /*
3177  *	vfs_bio_set_validclean:
3178  *
3179  *	Set the range within the buffer to valid and clean.  The range is
3180  *	relative to the beginning of the buffer, b_offset.  Note that b_offset
3181  *	itself may be offset from the beginning of the first page.
3182  *
3183  */
3184 
3185 void
3186 vfs_bio_set_validclean(struct buf *bp, int base, int size)
3187 {
3188 	if (bp->b_flags & B_VMIO) {
3189 		int i;
3190 		int n;
3191 
3192 		/*
3193 		 * Fixup base to be relative to beginning of first page.
3194 		 * Set initial n to be the maximum number of bytes in the
3195 		 * first page that can be validated.
3196 		 */
3197 
3198 		base += (bp->b_offset & PAGE_MASK);
3199 		n = PAGE_SIZE - (base & PAGE_MASK);
3200 
3201 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
3202 			vm_page_t m = bp->b_pages[i];
3203 
3204 			if (n > size)
3205 				n = size;
3206 
3207 			vm_page_set_validclean(m, base & PAGE_MASK, n);
3208 			base += n;
3209 			size -= n;
3210 			n = PAGE_SIZE;
3211 		}
3212 	}
3213 }
3214 
3215 /*
3216  *	vfs_bio_clrbuf:
3217  *
3218  *	clear a buffer.  This routine essentially fakes an I/O, so we need
3219  *	to clear BIO_ERROR and B_INVAL.
3220  *
3221  *	Note that while we only theoretically need to clear through b_bcount,
3222  *	we go ahead and clear through b_bufsize.
3223  */
3224 
3225 void
3226 vfs_bio_clrbuf(struct buf *bp) {
3227 	int i, mask = 0;
3228 	caddr_t sa, ea;
3229 
3230 	GIANT_REQUIRED;
3231 
3232 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
3233 		bp->b_flags &= ~B_INVAL;
3234 		bp->b_ioflags &= ~BIO_ERROR;
3235 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
3236 		    (bp->b_offset & PAGE_MASK) == 0) {
3237 			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
3238 			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
3239 			    ((bp->b_pages[0]->valid & mask) != mask)) {
3240 				bzero(bp->b_data, bp->b_bufsize);
3241 			}
3242 			bp->b_pages[0]->valid |= mask;
3243 			bp->b_resid = 0;
3244 			return;
3245 		}
3246 		ea = sa = bp->b_data;
3247 		for(i=0;i<bp->b_npages;i++,sa=ea) {
3248 			int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
3249 			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
3250 			ea = (caddr_t)(vm_offset_t)ulmin(
3251 			    (u_long)(vm_offset_t)ea,
3252 			    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
3253 			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
3254 			if ((bp->b_pages[i]->valid & mask) == mask)
3255 				continue;
3256 			if ((bp->b_pages[i]->valid & mask) == 0) {
3257 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
3258 					bzero(sa, ea - sa);
3259 				}
3260 			} else {
3261 				for (; sa < ea; sa += DEV_BSIZE, j++) {
3262 					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
3263 						(bp->b_pages[i]->valid & (1<<j)) == 0)
3264 						bzero(sa, DEV_BSIZE);
3265 				}
3266 			}
3267 			bp->b_pages[i]->valid |= mask;
3268 			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
3269 		}
3270 		bp->b_resid = 0;
3271 	} else {
3272 		clrbuf(bp);
3273 	}
3274 }
3275 
3276 /*
3277  * vm_hold_load_pages and vm_hold_free_pages get pages into
3278  * a buffers address space.  The pages are anonymous and are
3279  * not associated with a file object.
3280  */
3281 static void
3282 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3283 {
3284 	vm_offset_t pg;
3285 	vm_page_t p;
3286 	int index;
3287 
3288 	GIANT_REQUIRED;
3289 
3290 	to = round_page(to);
3291 	from = round_page(from);
3292 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3293 
3294 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3295 tryagain:
3296 		/*
3297 		 * note: must allocate system pages since blocking here
3298 		 * could intefere with paging I/O, no matter which
3299 		 * process we are.
3300 		 */
3301 		p = vm_page_alloc(kernel_object,
3302 			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
3303 		    VM_ALLOC_SYSTEM);
3304 		if (!p) {
3305 			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
3306 			VM_WAIT;
3307 			goto tryagain;
3308 		}
3309 		vm_page_wire(p);
3310 		p->valid = VM_PAGE_BITS_ALL;
3311 		vm_page_flag_clear(p, PG_ZERO);
3312 		pmap_qenter(pg, &p, 1);
3313 		bp->b_pages[index] = p;
3314 		vm_page_wakeup(p);
3315 	}
3316 	bp->b_npages = index;
3317 }
3318 
3319 /* Return pages associated with this buf to the vm system */
3320 void
3321 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3322 {
3323 	vm_offset_t pg;
3324 	vm_page_t p;
3325 	int index, newnpages;
3326 
3327 	GIANT_REQUIRED;
3328 
3329 	from = round_page(from);
3330 	to = round_page(to);
3331 	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3332 
3333 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3334 		p = bp->b_pages[index];
3335 		if (p && (index < bp->b_npages)) {
3336 			if (p->busy) {
3337 				printf(
3338 			    "vm_hold_free_pages: blkno: %lld, lblkno: %lld\n",
3339 				    (long long)bp->b_blkno,
3340 				    (long long)bp->b_lblkno);
3341 			}
3342 			bp->b_pages[index] = NULL;
3343 			pmap_qremove(pg, 1);
3344 			vm_page_busy(p);
3345 			vm_page_unwire(p, 0);
3346 			vm_page_free(p);
3347 		}
3348 	}
3349 	bp->b_npages = newnpages;
3350 }
3351 
3352 
3353 #include "opt_ddb.h"
3354 #ifdef DDB
3355 #include <ddb/ddb.h>
3356 
3357 /* DDB command to show buffer data */
3358 DB_SHOW_COMMAND(buffer, db_show_buffer)
3359 {
3360 	/* get args */
3361 	struct buf *bp = (struct buf *)addr;
3362 
3363 	if (!have_addr) {
3364 		db_printf("usage: show buffer <addr>\n");
3365 		return;
3366 	}
3367 
3368 	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
3369 	db_printf(
3370 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
3371 	    "b_dev = (%d,%d), b_data = %p, b_blkno = %lld, b_pblkno = %lld\n",
3372 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
3373 	    major(bp->b_dev), minor(bp->b_dev), bp->b_data,
3374 	    (long long)bp->b_blkno, (long long)bp->b_pblkno);
3375 	if (bp->b_npages) {
3376 		int i;
3377 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
3378 		for (i = 0; i < bp->b_npages; i++) {
3379 			vm_page_t m;
3380 			m = bp->b_pages[i];
3381 			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3382 			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3383 			if ((i + 1) < bp->b_npages)
3384 				db_printf(",");
3385 		}
3386 		db_printf("\n");
3387 	}
3388 }
3389 #endif /* DDB */
3390