xref: /freebsd/sys/kern/vfs_bio.c (revision 5129159789cc9d7bc514e4546b88e3427695002d)
1 /*
2  * Copyright (c) 1994,1997 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Absolutely no warranty of function or purpose is made by the author
12  *		John S. Dyson.
13  *
14  * $FreeBSD$
15  */
16 
17 /*
18  * this file contains a new buffer I/O scheme implementing a coherent
19  * VM object and buffer cache scheme.  Pains have been taken to make
20  * sure that the performance degradation associated with schemes such
21  * as this is not realized.
22  *
23  * Author:  John S. Dyson
24  * Significant help during the development and debugging phases
25  * had been provided by David Greenman, also of the FreeBSD core team.
26  *
27  * see man buf(9) for more info.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/proc.h>
35 #include <sys/kthread.h>
36 #include <sys/vnode.h>
37 #include <sys/vmmeter.h>
38 #include <sys/lock.h>
39 #include <vm/vm.h>
40 #include <vm/vm_param.h>
41 #include <vm/vm_kern.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_map.h>
47 #include <sys/buf.h>
48 #include <sys/mount.h>
49 #include <sys/malloc.h>
50 #include <sys/resourcevar.h>
51 #include <sys/conf.h>
52 
53 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
54 
55 struct	bio_ops bioops;		/* I/O operation notification */
56 
57 struct buf *buf;		/* buffer header pool */
58 struct swqueue bswlist;
59 
60 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
61 		vm_offset_t to);
62 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
63 		vm_offset_t to);
64 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
65 			       int pageno, vm_page_t m);
66 static void vfs_clean_pages(struct buf * bp);
67 static void vfs_setdirty(struct buf *bp);
68 static void vfs_vmio_release(struct buf *bp);
69 static int flushbufqueues(void);
70 
71 static int bd_request;
72 
73 static void buf_daemon __P((void));
74 /*
75  * bogus page -- for I/O to/from partially complete buffers
76  * this is a temporary solution to the problem, but it is not
77  * really that bad.  it would be better to split the buffer
78  * for input in the case of buffers partially already in memory,
79  * but the code is intricate enough already.
80  */
81 vm_page_t bogus_page;
82 int runningbufspace;
83 int vmiodirenable = FALSE;
84 int buf_maxio = DFLTPHYS;
85 static vm_offset_t bogus_offset;
86 
87 static int bufspace, maxbufspace, vmiospace,
88 	bufmallocspace, maxbufmallocspace, hibufspace;
89 static int maxbdrun;
90 static int needsbuffer;
91 static int numdirtybuffers, hidirtybuffers;
92 static int numfreebuffers, lofreebuffers, hifreebuffers;
93 static int getnewbufcalls;
94 static int getnewbufrestarts;
95 static int kvafreespace;
96 
97 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
98 	&numdirtybuffers, 0, "");
99 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
100 	&hidirtybuffers, 0, "");
101 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
102 	&numfreebuffers, 0, "");
103 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
104 	&lofreebuffers, 0, "");
105 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
106 	&hifreebuffers, 0, "");
107 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD,
108 	&runningbufspace, 0, "");
109 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
110 	&maxbufspace, 0, "");
111 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD,
112 	&hibufspace, 0, "");
113 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
114 	&bufspace, 0, "");
115 SYSCTL_INT(_vfs, OID_AUTO, maxbdrun, CTLFLAG_RW,
116 	&maxbdrun, 0, "");
117 SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
118 	&vmiospace, 0, "");
119 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
120 	&maxbufmallocspace, 0, "");
121 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
122 	&bufmallocspace, 0, "");
123 SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
124 	&kvafreespace, 0, "");
125 SYSCTL_INT(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RW,
126 	&getnewbufcalls, 0, "");
127 SYSCTL_INT(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RW,
128 	&getnewbufrestarts, 0, "");
129 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW,
130 	&vmiodirenable, 0, "");
131 
132 
133 static int bufhashmask;
134 static LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
135 struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } };
136 char *buf_wmesg = BUF_WMESG;
137 
138 extern int vm_swap_size;
139 
140 #define BUF_MAXUSE		24
141 
142 #define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
143 #define VFS_BIO_NEED_DIRTYFLUSH	0x02	/* waiting for dirty buffer flush */
144 #define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
145 #define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
146 #define VFS_BIO_NEED_KVASPACE	0x10	/* wait for buffer_map space, emerg  */
147 
148 /*
149  * Buffer hash table code.  Note that the logical block scans linearly, which
150  * gives us some L1 cache locality.
151  */
152 
153 static __inline
154 struct bufhashhdr *
155 bufhash(struct vnode *vnp, daddr_t bn)
156 {
157 	return(&bufhashtbl[(((uintptr_t)(vnp) >> 7) + (int)bn) & bufhashmask]);
158 }
159 
160 /*
161  *	kvaspacewakeup:
162  *
163  *	Called when kva space is potential available for recovery or when
164  *	kva space is recovered in the buffer_map.  This function wakes up
165  *	anyone waiting for buffer_map kva space.  Even though the buffer_map
166  *	is larger then maxbufspace, this situation will typically occur
167  *	when the buffer_map gets fragmented.
168  */
169 
170 static __inline void
171 kvaspacewakeup(void)
172 {
173 	/*
174 	 * If someone is waiting for KVA space, wake them up.  Even
175 	 * though we haven't freed the kva space yet, the waiting
176 	 * process will be able to now.
177 	 */
178 	if (needsbuffer & VFS_BIO_NEED_KVASPACE) {
179 		needsbuffer &= ~VFS_BIO_NEED_KVASPACE;
180 		wakeup(&needsbuffer);
181 	}
182 }
183 
184 /*
185  *	numdirtywakeup:
186  *
187  *	If someone is blocked due to there being too many dirty buffers,
188  *	and numdirtybuffers is now reasonable, wake them up.
189  */
190 
191 static __inline void
192 numdirtywakeup(void)
193 {
194 	if (numdirtybuffers < hidirtybuffers) {
195 		if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
196 			needsbuffer &= ~VFS_BIO_NEED_DIRTYFLUSH;
197 			wakeup(&needsbuffer);
198 		}
199 	}
200 }
201 
202 /*
203  *	bufspacewakeup:
204  *
205  *	Called when buffer space is potentially available for recovery or when
206  *	buffer space is recovered.  getnewbuf() will block on this flag when
207  *	it is unable to free sufficient buffer space.  Buffer space becomes
208  *	recoverable when bp's get placed back in the queues.
209  */
210 
211 static __inline void
212 bufspacewakeup(void)
213 {
214 	/*
215 	 * If someone is waiting for BUF space, wake them up.  Even
216 	 * though we haven't freed the kva space yet, the waiting
217 	 * process will be able to now.
218 	 */
219 	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
220 		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
221 		wakeup(&needsbuffer);
222 	}
223 }
224 
225 /*
226  *	bufcountwakeup:
227  *
228  *	Called when a buffer has been added to one of the free queues to
229  *	account for the buffer and to wakeup anyone waiting for free buffers.
230  *	This typically occurs when large amounts of metadata are being handled
231  *	by the buffer cache ( else buffer space runs out first, usually ).
232  */
233 
234 static __inline void
235 bufcountwakeup(void)
236 {
237 	++numfreebuffers;
238 	if (needsbuffer) {
239 		needsbuffer &= ~VFS_BIO_NEED_ANY;
240 		if (numfreebuffers >= hifreebuffers)
241 			needsbuffer &= ~VFS_BIO_NEED_FREE;
242 		wakeup(&needsbuffer);
243 	}
244 }
245 
246 /*
247  *	vfs_buf_test_cache:
248  *
249  *	Called when a buffer is extended.  This function clears the B_CACHE
250  *	bit if the newly extended portion of the buffer does not contain
251  *	valid data.
252  */
253 static __inline__
254 void
255 vfs_buf_test_cache(struct buf *bp,
256 		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
257 		  vm_page_t m)
258 {
259 	if (bp->b_flags & B_CACHE) {
260 		int base = (foff + off) & PAGE_MASK;
261 		if (vm_page_is_valid(m, base, size) == 0)
262 			bp->b_flags &= ~B_CACHE;
263 	}
264 }
265 
266 static __inline__
267 void
268 bd_wakeup(int dirtybuflevel)
269 {
270 	if (numdirtybuffers >= dirtybuflevel && bd_request == 0) {
271 		bd_request = 1;
272 		wakeup(&bd_request);
273 	}
274 }
275 
276 /*
277  * bd_speedup - speedup the buffer cache flushing code
278  */
279 
280 static __inline__
281 void
282 bd_speedup(void)
283 {
284 	bd_wakeup(1);
285 }
286 
287 /*
288  * Initialize buffer headers and related structures.
289  */
290 
291 caddr_t
292 bufhashinit(caddr_t vaddr)
293 {
294 	/* first, make a null hash table */
295 	for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1)
296 		;
297 	bufhashtbl = (void *)vaddr;
298 	vaddr = vaddr + sizeof(*bufhashtbl) * bufhashmask;
299 	--bufhashmask;
300 	return(vaddr);
301 }
302 
303 void
304 bufinit(void)
305 {
306 	struct buf *bp;
307 	int i;
308 
309 	TAILQ_INIT(&bswlist);
310 	LIST_INIT(&invalhash);
311 	simple_lock_init(&buftimelock);
312 
313 	for (i = 0; i <= bufhashmask; i++)
314 		LIST_INIT(&bufhashtbl[i]);
315 
316 	/* next, make a null set of free lists */
317 	for (i = 0; i < BUFFER_QUEUES; i++)
318 		TAILQ_INIT(&bufqueues[i]);
319 
320 	/* finally, initialize each buffer header and stick on empty q */
321 	for (i = 0; i < nbuf; i++) {
322 		bp = &buf[i];
323 		bzero(bp, sizeof *bp);
324 		bp->b_flags = B_INVAL;	/* we're just an empty header */
325 		bp->b_dev = NODEV;
326 		bp->b_rcred = NOCRED;
327 		bp->b_wcred = NOCRED;
328 		bp->b_qindex = QUEUE_EMPTY;
329 		bp->b_xflags = 0;
330 		LIST_INIT(&bp->b_dep);
331 		BUF_LOCKINIT(bp);
332 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
333 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
334 	}
335 
336 	/*
337 	 * maxbufspace is currently calculated to be maximally efficient
338 	 * when the filesystem block size is DFLTBSIZE or DFLTBSIZE*2
339 	 * (4K or 8K).  To reduce the number of stall points our calculation
340 	 * is based on DFLTBSIZE which should reduce the chances of actually
341 	 * running out of buffer headers.  The maxbufspace calculation is also
342 	 * based on DFLTBSIZE (4K) instead of BKVASIZE (8K) in order to
343 	 * reduce the chance that a KVA allocation will fail due to
344 	 * fragmentation.  While this does not usually create a stall,
345 	 * the KVA map allocation/free functions are O(N) rather then O(1)
346 	 * so running them constantly would result in inefficient O(N*M)
347 	 * buffer cache operation.
348 	 */
349 	maxbufspace = (nbuf + 8) * DFLTBSIZE;
350 	hibufspace = imax(3 * maxbufspace / 4, maxbufspace - MAXBSIZE * 5);
351 /*
352  * Limit the amount of malloc memory since it is wired permanently into
353  * the kernel space.  Even though this is accounted for in the buffer
354  * allocation, we don't want the malloced region to grow uncontrolled.
355  * The malloc scheme improves memory utilization significantly on average
356  * (small) directories.
357  */
358 	maxbufmallocspace = hibufspace / 20;
359 
360 /*
361  * Reduce the chance of a deadlock occuring by limiting the number
362  * of delayed-write dirty buffers we allow to stack up.
363  */
364 	hidirtybuffers = nbuf / 4 + 20;
365 	numdirtybuffers = 0;
366 /*
367  * To support extreme low-memory systems, make sure hidirtybuffers cannot
368  * eat up all available buffer space.  This occurs when our minimum cannot
369  * be met.  We try to size hidirtybuffers to 3/4 our buffer space assuming
370  * BKVASIZE'd (8K) buffers.  We also reduce buf_maxio in this case (used
371  * by the clustering code) in an attempt to further reduce the load on
372  * the buffer cache.
373  */
374 	while (hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
375 		hidirtybuffers >>= 1;
376 		buf_maxio >>= 1;
377 	}
378 
379 	/*
380 	 * Temporary, BKVASIZE may be manipulated soon, make sure we don't
381 	 * do something illegal. XXX
382 	 */
383 #if BKVASIZE < MAXBSIZE
384 	if (buf_maxio < BKVASIZE * 2)
385 		buf_maxio = BKVASIZE * 2;
386 #else
387 	if (buf_maxio < MAXBSIZE)
388 		buf_maxio = MAXBSIZE;
389 #endif
390 
391 /*
392  * Try to keep the number of free buffers in the specified range,
393  * and give the syncer access to an emergency reserve.
394  */
395 	lofreebuffers = nbuf / 18 + 5;
396 	hifreebuffers = 2 * lofreebuffers;
397 	numfreebuffers = nbuf;
398 
399 /*
400  * Maximum number of async ops initiated per buf_daemon loop.  This is
401  * somewhat of a hack at the moment, we really need to limit ourselves
402  * based on the number of bytes of I/O in-transit that were initiated
403  * from buf_daemon.
404  */
405 	if ((maxbdrun = nswbuf / 4) < 4)
406 		maxbdrun = 4;
407 
408 	kvafreespace = 0;
409 
410 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
411 	bogus_page = vm_page_alloc(kernel_object,
412 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
413 			VM_ALLOC_NORMAL);
414 	cnt.v_wire_count++;
415 
416 }
417 
418 /*
419  * Free the kva allocation for a buffer
420  * Must be called only at splbio or higher,
421  *  as this is the only locking for buffer_map.
422  */
423 static void
424 bfreekva(struct buf * bp)
425 {
426 	if (bp->b_kvasize) {
427 		vm_map_delete(buffer_map,
428 		    (vm_offset_t) bp->b_kvabase,
429 		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
430 		);
431 		bp->b_kvasize = 0;
432 		kvaspacewakeup();
433 	}
434 }
435 
436 /*
437  *	bremfree:
438  *
439  *	Remove the buffer from the appropriate free list.
440  */
441 void
442 bremfree(struct buf * bp)
443 {
444 	int s = splbio();
445 	int old_qindex = bp->b_qindex;
446 
447 	if (bp->b_qindex != QUEUE_NONE) {
448 		if (bp->b_qindex == QUEUE_EMPTYKVA) {
449 			kvafreespace -= bp->b_kvasize;
450 		}
451 		KASSERT(BUF_REFCNT(bp) == 1, ("bremfree: bp %p not locked",bp));
452 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
453 		bp->b_qindex = QUEUE_NONE;
454 		runningbufspace += bp->b_bufsize;
455 	} else {
456 #if !defined(MAX_PERF)
457 		if (BUF_REFCNT(bp) <= 1)
458 			panic("bremfree: removing a buffer not on a queue");
459 #endif
460 	}
461 
462 	/*
463 	 * Fixup numfreebuffers count.  If the buffer is invalid or not
464 	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
465 	 * the buffer was free and we must decrement numfreebuffers.
466 	 */
467 	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
468 		switch(old_qindex) {
469 		case QUEUE_DIRTY:
470 		case QUEUE_CLEAN:
471 		case QUEUE_EMPTY:
472 		case QUEUE_EMPTYKVA:
473 			--numfreebuffers;
474 			break;
475 		default:
476 			break;
477 		}
478 	}
479 	splx(s);
480 }
481 
482 
483 /*
484  * Get a buffer with the specified data.  Look in the cache first.  We
485  * must clear B_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
486  * is set, the buffer is valid and we do not have to do anything ( see
487  * getblk() ).
488  */
489 int
490 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
491     struct buf ** bpp)
492 {
493 	struct buf *bp;
494 
495 	bp = getblk(vp, blkno, size, 0, 0);
496 	*bpp = bp;
497 
498 	/* if not found in cache, do some I/O */
499 	if ((bp->b_flags & B_CACHE) == 0) {
500 		if (curproc != NULL)
501 			curproc->p_stats->p_ru.ru_inblock++;
502 		KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
503 		bp->b_flags |= B_READ;
504 		bp->b_flags &= ~(B_ERROR | B_INVAL);
505 		if (bp->b_rcred == NOCRED) {
506 			if (cred != NOCRED)
507 				crhold(cred);
508 			bp->b_rcred = cred;
509 		}
510 		vfs_busy_pages(bp, 0);
511 		VOP_STRATEGY(vp, bp);
512 		return (biowait(bp));
513 	}
514 	return (0);
515 }
516 
517 /*
518  * Operates like bread, but also starts asynchronous I/O on
519  * read-ahead blocks.  We must clear B_ERROR and B_INVAL prior
520  * to initiating I/O . If B_CACHE is set, the buffer is valid
521  * and we do not have to do anything.
522  */
523 int
524 breadn(struct vnode * vp, daddr_t blkno, int size,
525     daddr_t * rablkno, int *rabsize,
526     int cnt, struct ucred * cred, struct buf ** bpp)
527 {
528 	struct buf *bp, *rabp;
529 	int i;
530 	int rv = 0, readwait = 0;
531 
532 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
533 
534 	/* if not found in cache, do some I/O */
535 	if ((bp->b_flags & B_CACHE) == 0) {
536 		if (curproc != NULL)
537 			curproc->p_stats->p_ru.ru_inblock++;
538 		bp->b_flags |= B_READ;
539 		bp->b_flags &= ~(B_ERROR | B_INVAL);
540 		if (bp->b_rcred == NOCRED) {
541 			if (cred != NOCRED)
542 				crhold(cred);
543 			bp->b_rcred = cred;
544 		}
545 		vfs_busy_pages(bp, 0);
546 		VOP_STRATEGY(vp, bp);
547 		++readwait;
548 	}
549 
550 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
551 		if (inmem(vp, *rablkno))
552 			continue;
553 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
554 
555 		if ((rabp->b_flags & B_CACHE) == 0) {
556 			if (curproc != NULL)
557 				curproc->p_stats->p_ru.ru_inblock++;
558 			rabp->b_flags |= B_READ | B_ASYNC;
559 			rabp->b_flags &= ~(B_ERROR | B_INVAL);
560 			if (rabp->b_rcred == NOCRED) {
561 				if (cred != NOCRED)
562 					crhold(cred);
563 				rabp->b_rcred = cred;
564 			}
565 			vfs_busy_pages(rabp, 0);
566 			BUF_KERNPROC(rabp);
567 			VOP_STRATEGY(vp, rabp);
568 		} else {
569 			brelse(rabp);
570 		}
571 	}
572 
573 	if (readwait) {
574 		rv = biowait(bp);
575 	}
576 	return (rv);
577 }
578 
579 /*
580  * Write, release buffer on completion.  (Done by iodone
581  * if async).  Do not bother writing anything if the buffer
582  * is invalid.
583  *
584  * Note that we set B_CACHE here, indicating that buffer is
585  * fully valid and thus cacheable.  This is true even of NFS
586  * now so we set it generally.  This could be set either here
587  * or in biodone() since the I/O is synchronous.  We put it
588  * here.
589  */
590 int
591 bwrite(struct buf * bp)
592 {
593 	int oldflags, s;
594 
595 	if (bp->b_flags & B_INVAL) {
596 		brelse(bp);
597 		return (0);
598 	}
599 
600 	oldflags = bp->b_flags;
601 
602 #if !defined(MAX_PERF)
603 	if (BUF_REFCNT(bp) == 0)
604 		panic("bwrite: buffer is not busy???");
605 #endif
606 	s = splbio();
607 	bundirty(bp);
608 
609 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
610 	bp->b_flags |= B_WRITEINPROG | B_CACHE;
611 
612 	bp->b_vp->v_numoutput++;
613 	vfs_busy_pages(bp, 1);
614 	if (curproc != NULL)
615 		curproc->p_stats->p_ru.ru_oublock++;
616 	splx(s);
617 	if (oldflags & B_ASYNC)
618 		BUF_KERNPROC(bp);
619 	VOP_STRATEGY(bp->b_vp, bp);
620 
621 	if ((oldflags & B_ASYNC) == 0) {
622 		int rtval = biowait(bp);
623 		brelse(bp);
624 		return (rtval);
625 	}
626 
627 	return (0);
628 }
629 
630 /*
631  * Delayed write. (Buffer is marked dirty).  Do not bother writing
632  * anything if the buffer is marked invalid.
633  *
634  * Note that since the buffer must be completely valid, we can safely
635  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
636  * biodone() in order to prevent getblk from writing the buffer
637  * out synchronously.
638  */
639 void
640 bdwrite(struct buf * bp)
641 {
642 #if !defined(MAX_PERF)
643 	if (BUF_REFCNT(bp) == 0)
644 		panic("bdwrite: buffer is not busy");
645 #endif
646 
647 	if (bp->b_flags & B_INVAL) {
648 		brelse(bp);
649 		return;
650 	}
651 	bdirty(bp);
652 
653 	/*
654 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
655 	 * true even of NFS now.
656 	 */
657 	bp->b_flags |= B_CACHE;
658 
659 	/*
660 	 * This bmap keeps the system from needing to do the bmap later,
661 	 * perhaps when the system is attempting to do a sync.  Since it
662 	 * is likely that the indirect block -- or whatever other datastructure
663 	 * that the filesystem needs is still in memory now, it is a good
664 	 * thing to do this.  Note also, that if the pageout daemon is
665 	 * requesting a sync -- there might not be enough memory to do
666 	 * the bmap then...  So, this is important to do.
667 	 */
668 	if (bp->b_lblkno == bp->b_blkno) {
669 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
670 	}
671 
672 	/*
673 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
674 	 */
675 	vfs_setdirty(bp);
676 
677 	/*
678 	 * We need to do this here to satisfy the vnode_pager and the
679 	 * pageout daemon, so that it thinks that the pages have been
680 	 * "cleaned".  Note that since the pages are in a delayed write
681 	 * buffer -- the VFS layer "will" see that the pages get written
682 	 * out on the next sync, or perhaps the cluster will be completed.
683 	 */
684 	vfs_clean_pages(bp);
685 	bqrelse(bp);
686 
687 	/*
688 	 * Wakeup the buffer flushing daemon if we have saturated the
689 	 * buffer cache.
690 	 */
691 
692 	bd_wakeup(hidirtybuffers);
693 
694 	/*
695 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
696 	 * due to the softdep code.
697 	 */
698 }
699 
700 /*
701  *	bdirty:
702  *
703  *	Turn buffer into delayed write request.  We must clear B_READ and
704  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
705  *	itself to properly update it in the dirty/clean lists.  We mark it
706  *	B_DONE to ensure that any asynchronization of the buffer properly
707  *	clears B_DONE ( else a panic will occur later ).
708  *
709  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
710  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
711  *	should only be called if the buffer is known-good.
712  *
713  *	Since the buffer is not on a queue, we do not update the numfreebuffers
714  *	count.
715  *
716  *	Must be called at splbio().
717  *	The buffer must be on QUEUE_NONE.
718  */
719 void
720 bdirty(bp)
721 	struct buf *bp;
722 {
723 	KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
724 	bp->b_flags &= ~(B_READ|B_RELBUF);
725 
726 	if ((bp->b_flags & B_DELWRI) == 0) {
727 		bp->b_flags |= B_DONE | B_DELWRI;
728 		reassignbuf(bp, bp->b_vp);
729 		++numdirtybuffers;
730 		bd_wakeup(hidirtybuffers);
731 	}
732 }
733 
734 /*
735  *	bundirty:
736  *
737  *	Clear B_DELWRI for buffer.
738  *
739  *	Since the buffer is not on a queue, we do not update the numfreebuffers
740  *	count.
741  *
742  *	Must be called at splbio().
743  *	The buffer must be on QUEUE_NONE.
744  */
745 
746 void
747 bundirty(bp)
748 	struct buf *bp;
749 {
750 	KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
751 
752 	if (bp->b_flags & B_DELWRI) {
753 		bp->b_flags &= ~B_DELWRI;
754 		reassignbuf(bp, bp->b_vp);
755 		--numdirtybuffers;
756 		numdirtywakeup();
757 	}
758 }
759 
760 /*
761  *	bawrite:
762  *
763  *	Asynchronous write.  Start output on a buffer, but do not wait for
764  *	it to complete.  The buffer is released when the output completes.
765  *
766  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
767  *	B_INVAL buffers.  Not us.
768  */
769 void
770 bawrite(struct buf * bp)
771 {
772 	bp->b_flags |= B_ASYNC;
773 	(void) VOP_BWRITE(bp->b_vp, bp);
774 }
775 
776 /*
777  *	bowrite:
778  *
779  *	Ordered write.  Start output on a buffer, and flag it so that the
780  *	device will write it in the order it was queued.  The buffer is
781  *	released when the output completes.  bwrite() ( or the VOP routine
782  *	anyway ) is responsible for handling B_INVAL buffers.
783  */
784 int
785 bowrite(struct buf * bp)
786 {
787 	bp->b_flags |= B_ORDERED | B_ASYNC;
788 	return (VOP_BWRITE(bp->b_vp, bp));
789 }
790 
791 /*
792  *	bwillwrite:
793  *
794  *	Called prior to the locking of any vnodes when we are expecting to
795  *	write.  We do not want to starve the buffer cache with too many
796  *	dirty buffers so we block here.  By blocking prior to the locking
797  *	of any vnodes we attempt to avoid the situation where a locked vnode
798  *	prevents the various system daemons from flushing related buffers.
799  */
800 
801 void
802 bwillwrite(void)
803 {
804 	int slop = hidirtybuffers / 10;
805 
806 	if (numdirtybuffers > hidirtybuffers + slop) {
807 		int s;
808 
809 		s = splbio();
810 		while (numdirtybuffers > hidirtybuffers) {
811 			bd_wakeup(hidirtybuffers);
812 			needsbuffer |= VFS_BIO_NEED_DIRTYFLUSH;
813 			tsleep(&needsbuffer, (PRIBIO + 4), "flswai", 0);
814 		}
815 		splx(s);
816 	}
817 }
818 
819 /*
820  *	brelse:
821  *
822  *	Release a busy buffer and, if requested, free its resources.  The
823  *	buffer will be stashed in the appropriate bufqueue[] allowing it
824  *	to be accessed later as a cache entity or reused for other purposes.
825  */
826 void
827 brelse(struct buf * bp)
828 {
829 	int s;
830 	int kvawakeup = 0;
831 
832 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
833 
834 	s = splbio();
835 
836 	if (bp->b_flags & B_LOCKED)
837 		bp->b_flags &= ~B_ERROR;
838 
839 	if ((bp->b_flags & (B_READ | B_ERROR | B_INVAL)) == B_ERROR) {
840 		/*
841 		 * Failed write, redirty.  Must clear B_ERROR to prevent
842 		 * pages from being scrapped.  If B_INVAL is set then
843 		 * this case is not run and the next case is run to
844 		 * destroy the buffer.  B_INVAL can occur if the buffer
845 		 * is outside the range supported by the underlying device.
846 		 */
847 		bp->b_flags &= ~B_ERROR;
848 		bdirty(bp);
849 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
850 	    (bp->b_bufsize <= 0)) {
851 		/*
852 		 * Either a failed I/O or we were asked to free or not
853 		 * cache the buffer.
854 		 */
855 		bp->b_flags |= B_INVAL;
856 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
857 			(*bioops.io_deallocate)(bp);
858 		if (bp->b_flags & B_DELWRI) {
859 			--numdirtybuffers;
860 			numdirtywakeup();
861 		}
862 		bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
863 		if ((bp->b_flags & B_VMIO) == 0) {
864 			if (bp->b_bufsize)
865 				allocbuf(bp, 0);
866 			if (bp->b_vp)
867 				brelvp(bp);
868 		}
869 	}
870 
871 	/*
872 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
873 	 * is called with B_DELWRI set, the underlying pages may wind up
874 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
875 	 * because pages associated with a B_DELWRI bp are marked clean.
876 	 *
877 	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
878 	 * if B_DELWRI is set.
879 	 */
880 
881 	if (bp->b_flags & B_DELWRI)
882 		bp->b_flags &= ~B_RELBUF;
883 
884 	/*
885 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
886 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
887 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
888 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
889 	 *
890 	 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be
891 	 * invalidated.  B_ERROR cannot be set for a failed write unless the
892 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
893 	 *
894 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
895 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
896 	 * the commit state and we cannot afford to lose the buffer.
897 	 */
898 	if ((bp->b_flags & B_VMIO)
899 	    && !(bp->b_vp->v_tag == VT_NFS &&
900 		 !vn_isdisk(bp->b_vp) &&
901 		 (bp->b_flags & B_DELWRI))
902 	    ) {
903 
904 		int i, j, resid;
905 		vm_page_t m;
906 		off_t foff;
907 		vm_pindex_t poff;
908 		vm_object_t obj;
909 		struct vnode *vp;
910 
911 		vp = bp->b_vp;
912 
913 		/*
914 		 * Get the base offset and length of the buffer.  Note that
915 		 * for block sizes that are less then PAGE_SIZE, the b_data
916 		 * base of the buffer does not represent exactly b_offset and
917 		 * neither b_offset nor b_size are necessarily page aligned.
918 		 * Instead, the starting position of b_offset is:
919 		 *
920 		 * 	b_data + (b_offset & PAGE_MASK)
921 		 *
922 		 * block sizes less then DEV_BSIZE (usually 512) are not
923 		 * supported due to the page granularity bits (m->valid,
924 		 * m->dirty, etc...).
925 		 *
926 		 * See man buf(9) for more information
927 		 */
928 
929 		resid = bp->b_bufsize;
930 		foff = bp->b_offset;
931 
932 		for (i = 0; i < bp->b_npages; i++) {
933 			m = bp->b_pages[i];
934 			vm_page_flag_clear(m, PG_ZERO);
935 			if (m == bogus_page) {
936 
937 				obj = (vm_object_t) vp->v_object;
938 				poff = OFF_TO_IDX(bp->b_offset);
939 
940 				for (j = i; j < bp->b_npages; j++) {
941 					m = bp->b_pages[j];
942 					if (m == bogus_page) {
943 						m = vm_page_lookup(obj, poff + j);
944 #if !defined(MAX_PERF)
945 						if (!m) {
946 							panic("brelse: page missing\n");
947 						}
948 #endif
949 						bp->b_pages[j] = m;
950 					}
951 				}
952 
953 				if ((bp->b_flags & B_INVAL) == 0) {
954 					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
955 				}
956 			}
957 			if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
958 				int poffset = foff & PAGE_MASK;
959 				int presid = resid > (PAGE_SIZE - poffset) ?
960 					(PAGE_SIZE - poffset) : resid;
961 
962 				KASSERT(presid >= 0, ("brelse: extra page"));
963 				vm_page_set_invalid(m, poffset, presid);
964 			}
965 			resid -= PAGE_SIZE - (foff & PAGE_MASK);
966 			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
967 		}
968 
969 		if (bp->b_flags & (B_INVAL | B_RELBUF))
970 			vfs_vmio_release(bp);
971 
972 	} else if (bp->b_flags & B_VMIO) {
973 
974 		if (bp->b_flags & (B_INVAL | B_RELBUF))
975 			vfs_vmio_release(bp);
976 
977 	}
978 
979 #if !defined(MAX_PERF)
980 	if (bp->b_qindex != QUEUE_NONE)
981 		panic("brelse: free buffer onto another queue???");
982 #endif
983 	if (BUF_REFCNT(bp) > 1) {
984 		/* Temporary panic to verify exclusive locking */
985 		/* This panic goes away when we allow shared refs */
986 		panic("brelse: multiple refs");
987 		/* do not release to free list */
988 		BUF_UNLOCK(bp);
989 		splx(s);
990 		return;
991 	}
992 
993 	/* enqueue */
994 
995 	/* buffers with no memory */
996 	if (bp->b_bufsize == 0) {
997 		bp->b_flags |= B_INVAL;
998 		if (bp->b_kvasize) {
999 			bp->b_qindex = QUEUE_EMPTYKVA;
1000 			kvawakeup = 1;
1001 		} else {
1002 			bp->b_qindex = QUEUE_EMPTY;
1003 		}
1004 		TAILQ_INSERT_HEAD(&bufqueues[bp->b_qindex], bp, b_freelist);
1005 		LIST_REMOVE(bp, b_hash);
1006 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1007 		bp->b_dev = NODEV;
1008 		kvafreespace += bp->b_kvasize;
1009 	/* buffers with junk contents */
1010 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
1011 		bp->b_flags |= B_INVAL;
1012 		bp->b_qindex = QUEUE_CLEAN;
1013 		if (bp->b_kvasize)
1014 			kvawakeup = 1;
1015 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1016 		LIST_REMOVE(bp, b_hash);
1017 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1018 		bp->b_dev = NODEV;
1019 
1020 	/* buffers that are locked */
1021 	} else if (bp->b_flags & B_LOCKED) {
1022 		bp->b_qindex = QUEUE_LOCKED;
1023 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1024 
1025 	/* remaining buffers */
1026 	} else {
1027 		switch(bp->b_flags & (B_DELWRI|B_AGE)) {
1028 		case B_DELWRI | B_AGE:
1029 		    bp->b_qindex = QUEUE_DIRTY;
1030 		    TAILQ_INSERT_HEAD(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
1031 		    break;
1032 		case B_DELWRI:
1033 		    bp->b_qindex = QUEUE_DIRTY;
1034 		    TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
1035 		    break;
1036 		case B_AGE:
1037 		    bp->b_qindex = QUEUE_CLEAN;
1038 		    TAILQ_INSERT_HEAD(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1039 		    if (bp->b_kvasize)
1040 			    kvawakeup = 1;
1041 		    break;
1042 		default:
1043 		    bp->b_qindex = QUEUE_CLEAN;
1044 		    TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1045 		    if (bp->b_kvasize)
1046 			    kvawakeup = 1;
1047 		    break;
1048 		}
1049 	}
1050 
1051 	/*
1052 	 * If B_INVAL, clear B_DELWRI.  We've already placed the buffer
1053 	 * on the correct queue.
1054 	 */
1055 	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) {
1056 		bp->b_flags &= ~B_DELWRI;
1057 		--numdirtybuffers;
1058 		numdirtywakeup();
1059 	}
1060 
1061 	runningbufspace -= bp->b_bufsize;
1062 
1063 	/*
1064 	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
1065 	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
1066 	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
1067 	 * if B_INVAL is set ).
1068 	 */
1069 
1070 	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
1071 		bufcountwakeup();
1072 
1073 	/*
1074 	 * Something we can maybe free.
1075 	 */
1076 
1077 	if (bp->b_bufsize)
1078 		bufspacewakeup();
1079 	if (kvawakeup)
1080 		kvaspacewakeup();
1081 
1082 	/* unlock */
1083 	BUF_UNLOCK(bp);
1084 	bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1085 	splx(s);
1086 }
1087 
1088 /*
1089  * Release a buffer back to the appropriate queue but do not try to free
1090  * it.
1091  *
1092  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
1093  * biodone() to requeue an async I/O on completion.  It is also used when
1094  * known good buffers need to be requeued but we think we may need the data
1095  * again soon.
1096  */
1097 void
1098 bqrelse(struct buf * bp)
1099 {
1100 	int s;
1101 
1102 	s = splbio();
1103 
1104 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
1105 
1106 #if !defined(MAX_PERF)
1107 	if (bp->b_qindex != QUEUE_NONE)
1108 		panic("bqrelse: free buffer onto another queue???");
1109 #endif
1110 	if (BUF_REFCNT(bp) > 1) {
1111 		/* do not release to free list */
1112 		panic("bqrelse: multiple refs");
1113 		BUF_UNLOCK(bp);
1114 		splx(s);
1115 		return;
1116 	}
1117 	if (bp->b_flags & B_LOCKED) {
1118 		bp->b_flags &= ~B_ERROR;
1119 		bp->b_qindex = QUEUE_LOCKED;
1120 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
1121 		/* buffers with stale but valid contents */
1122 	} else if (bp->b_flags & B_DELWRI) {
1123 		bp->b_qindex = QUEUE_DIRTY;
1124 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_DIRTY], bp, b_freelist);
1125 	} else {
1126 		bp->b_qindex = QUEUE_CLEAN;
1127 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_CLEAN], bp, b_freelist);
1128 	}
1129 
1130 	runningbufspace -= bp->b_bufsize;
1131 
1132 	if ((bp->b_flags & B_LOCKED) == 0 &&
1133 	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))) {
1134 		bufcountwakeup();
1135 	}
1136 
1137 	/*
1138 	 * Something we can maybe wakeup
1139 	 */
1140 	if (bp->b_bufsize && !(bp->b_flags & B_DELWRI))
1141 		bufspacewakeup();
1142 
1143 	/* unlock */
1144 	BUF_UNLOCK(bp);
1145 	bp->b_flags &= ~(B_ORDERED | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1146 	splx(s);
1147 }
1148 
1149 static void
1150 vfs_vmio_release(bp)
1151 	struct buf *bp;
1152 {
1153 	int i, s;
1154 	vm_page_t m;
1155 
1156 	s = splvm();
1157 	for (i = 0; i < bp->b_npages; i++) {
1158 		m = bp->b_pages[i];
1159 		bp->b_pages[i] = NULL;
1160 		/*
1161 		 * In order to keep page LRU ordering consistent, put
1162 		 * everything on the inactive queue.
1163 		 */
1164 		vm_page_unwire(m, 0);
1165 		/*
1166 		 * We don't mess with busy pages, it is
1167 		 * the responsibility of the process that
1168 		 * busied the pages to deal with them.
1169 		 */
1170 		if ((m->flags & PG_BUSY) || (m->busy != 0))
1171 			continue;
1172 
1173 		if (m->wire_count == 0) {
1174 			vm_page_flag_clear(m, PG_ZERO);
1175 			/*
1176 			 * Might as well free the page if we can and it has
1177 			 * no valid data.
1178 			 */
1179 			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) {
1180 				vm_page_busy(m);
1181 				vm_page_protect(m, VM_PROT_NONE);
1182 				vm_page_free(m);
1183 			}
1184 		}
1185 	}
1186 	bufspace -= bp->b_bufsize;
1187 	vmiospace -= bp->b_bufsize;
1188 	runningbufspace -= bp->b_bufsize;
1189 	splx(s);
1190 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1191 	if (bp->b_bufsize)
1192 		bufspacewakeup();
1193 	bp->b_npages = 0;
1194 	bp->b_bufsize = 0;
1195 	bp->b_flags &= ~B_VMIO;
1196 	if (bp->b_vp)
1197 		brelvp(bp);
1198 }
1199 
1200 /*
1201  * Check to see if a block is currently memory resident.
1202  */
1203 struct buf *
1204 gbincore(struct vnode * vp, daddr_t blkno)
1205 {
1206 	struct buf *bp;
1207 	struct bufhashhdr *bh;
1208 
1209 	bh = bufhash(vp, blkno);
1210 
1211 	/* Search hash chain */
1212 	LIST_FOREACH(bp, bh, b_hash) {
1213 		/* hit */
1214 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1215 		    (bp->b_flags & B_INVAL) == 0) {
1216 			break;
1217 		}
1218 	}
1219 	return (bp);
1220 }
1221 
1222 /*
1223  *	vfs_bio_awrite:
1224  *
1225  *	Implement clustered async writes for clearing out B_DELWRI buffers.
1226  *	This is much better then the old way of writing only one buffer at
1227  *	a time.  Note that we may not be presented with the buffers in the
1228  *	correct order, so we search for the cluster in both directions.
1229  */
1230 int
1231 vfs_bio_awrite(struct buf * bp)
1232 {
1233 	int i;
1234 	int j;
1235 	daddr_t lblkno = bp->b_lblkno;
1236 	struct vnode *vp = bp->b_vp;
1237 	int s;
1238 	int ncl;
1239 	struct buf *bpa;
1240 	int nwritten;
1241 	int size;
1242 	int maxcl;
1243 
1244 	s = splbio();
1245 	/*
1246 	 * right now we support clustered writing only to regular files.  If
1247 	 * we find a clusterable block we could be in the middle of a cluster
1248 	 * rather then at the beginning.
1249 	 */
1250 	if ((vp->v_type == VREG) &&
1251 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1252 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1253 
1254 		size = vp->v_mount->mnt_stat.f_iosize;
1255 		maxcl = MAXPHYS / size;
1256 
1257 		for (i = 1; i < maxcl; i++) {
1258 			if ((bpa = gbincore(vp, lblkno + i)) &&
1259 			    BUF_REFCNT(bpa) == 0 &&
1260 			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1261 			    (B_DELWRI | B_CLUSTEROK)) &&
1262 			    (bpa->b_bufsize == size)) {
1263 				if ((bpa->b_blkno == bpa->b_lblkno) ||
1264 				    (bpa->b_blkno !=
1265 				     bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1266 					break;
1267 			} else {
1268 				break;
1269 			}
1270 		}
1271 		for (j = 1; i + j <= maxcl && j <= lblkno; j++) {
1272 			if ((bpa = gbincore(vp, lblkno - j)) &&
1273 			    BUF_REFCNT(bpa) == 0 &&
1274 			    ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1275 			    (B_DELWRI | B_CLUSTEROK)) &&
1276 			    (bpa->b_bufsize == size)) {
1277 				if ((bpa->b_blkno == bpa->b_lblkno) ||
1278 				    (bpa->b_blkno !=
1279 				     bp->b_blkno - ((j * size) >> DEV_BSHIFT)))
1280 					break;
1281 			} else {
1282 				break;
1283 			}
1284 		}
1285 		--j;
1286 		ncl = i + j;
1287 		/*
1288 		 * this is a possible cluster write
1289 		 */
1290 		if (ncl != 1) {
1291 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl);
1292 			splx(s);
1293 			return nwritten;
1294 		}
1295 	}
1296 
1297 	BUF_LOCK(bp, LK_EXCLUSIVE);
1298 	bremfree(bp);
1299 	bp->b_flags |= B_ASYNC;
1300 
1301 	splx(s);
1302 	/*
1303 	 * default (old) behavior, writing out only one block
1304 	 *
1305 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1306 	 */
1307 	nwritten = bp->b_bufsize;
1308 	(void) VOP_BWRITE(bp->b_vp, bp);
1309 
1310 	return nwritten;
1311 }
1312 
1313 /*
1314  *	getnewbuf:
1315  *
1316  *	Find and initialize a new buffer header, freeing up existing buffers
1317  *	in the bufqueues as necessary.  The new buffer is returned locked.
1318  *
1319  *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1320  *	buffer away, the caller must set B_INVAL prior to calling brelse().
1321  *
1322  *	We block if:
1323  *		We have insufficient buffer headers
1324  *		We have insufficient buffer space
1325  *		buffer_map is too fragmented ( space reservation fails )
1326  *		If we have to flush dirty buffers ( but we try to avoid this )
1327  *
1328  *	To avoid VFS layer recursion we do not flush dirty buffers ourselves.
1329  *	Instead we ask the buf daemon to do it for us.  We attempt to
1330  *	avoid piecemeal wakeups of the pageout daemon.
1331  */
1332 
1333 static struct buf *
1334 getnewbuf(int slpflag, int slptimeo, int size, int maxsize)
1335 {
1336 	struct buf *bp;
1337 	struct buf *nbp;
1338 	struct buf *dbp;
1339 	int outofspace;
1340 	int nqindex;
1341 	int defrag = 0;
1342 
1343 	++getnewbufcalls;
1344 	--getnewbufrestarts;
1345 restart:
1346 	++getnewbufrestarts;
1347 
1348 	/*
1349 	 * Calculate whether we are out of buffer space.  This state is
1350 	 * recalculated on every restart.  If we are out of space, we
1351 	 * have to turn off defragmentation.  Setting defrag to -1 when
1352 	 * outofspace is positive means "defrag while freeing buffers".
1353 	 * The looping conditional will be muffed up if defrag is left
1354 	 * positive when outofspace is positive.
1355 	 */
1356 
1357 	dbp = NULL;
1358 	outofspace = 0;
1359 	if (bufspace >= hibufspace) {
1360 		if ((curproc && (curproc->p_flag & P_BUFEXHAUST) == 0) ||
1361 		    bufspace >= maxbufspace) {
1362 			outofspace = 1;
1363 			if (defrag > 0)
1364 				defrag = -1;
1365 		}
1366 	}
1367 
1368 	/*
1369 	 * defrag state is semi-persistant.  1 means we are flagged for
1370 	 * defragging.  -1 means we actually defragged something.
1371 	 */
1372 	/* nop */
1373 
1374 	/*
1375 	 * Setup for scan.  If we do not have enough free buffers,
1376 	 * we setup a degenerate case that immediately fails.  Note
1377 	 * that if we are specially marked process, we are allowed to
1378 	 * dip into our reserves.
1379 	 *
1380 	 * Normally we want to find an EMPTYKVA buffer.  That is, a
1381 	 * buffer with kva already allocated.  If there are no EMPTYKVA
1382 	 * buffers we back up to the truely EMPTY buffers.  When defragging
1383 	 * we do not bother backing up since we have to locate buffers with
1384 	 * kva to defrag.  If we are out of space we skip both EMPTY and
1385 	 * EMPTYKVA and dig right into the CLEAN queue.
1386 	 *
1387 	 * In this manner we avoid scanning unnecessary buffers.  It is very
1388 	 * important for us to do this because the buffer cache is almost
1389 	 * constantly out of space or in need of defragmentation.
1390 	 */
1391 
1392 	if (curproc && (curproc->p_flag & P_BUFEXHAUST) == 0 &&
1393 	    numfreebuffers < lofreebuffers) {
1394 		nqindex = QUEUE_CLEAN;
1395 		nbp = NULL;
1396 	} else {
1397 		nqindex = QUEUE_EMPTYKVA;
1398 		nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA]);
1399 		if (nbp == NULL) {
1400 			if (defrag <= 0) {
1401 				nqindex = QUEUE_EMPTY;
1402 				nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]);
1403 			}
1404 		}
1405 		if (outofspace || nbp == NULL) {
1406 			nqindex = QUEUE_CLEAN;
1407 			nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN]);
1408 		}
1409 	}
1410 
1411 	/*
1412 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1413 	 * depending.
1414 	 */
1415 
1416 	while ((bp = nbp) != NULL) {
1417 		int qindex = nqindex;
1418 
1419 		/*
1420 		 * Calculate next bp ( we can only use it if we do not block
1421 		 * or do other fancy things ).
1422 		 */
1423 		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1424 			switch(qindex) {
1425 			case QUEUE_EMPTY:
1426 				nqindex = QUEUE_EMPTYKVA;
1427 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTYKVA])))
1428 					break;
1429 				/* fall through */
1430 			case QUEUE_EMPTYKVA:
1431 				nqindex = QUEUE_CLEAN;
1432 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_CLEAN])))
1433 					break;
1434 				/* fall through */
1435 			case QUEUE_CLEAN:
1436 				/*
1437 				 * nbp is NULL.
1438 				 */
1439 				break;
1440 			}
1441 		}
1442 
1443 		/*
1444 		 * Sanity Checks
1445 		 */
1446 		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1447 
1448 		/*
1449 		 * Note: we no longer distinguish between VMIO and non-VMIO
1450 		 * buffers.
1451 		 */
1452 
1453 		KASSERT((bp->b_flags & B_DELWRI) == 0, ("delwri buffer %p found in queue %d", bp, qindex));
1454 
1455 		/*
1456 		 * If we are defragging and the buffer isn't useful for fixing
1457 		 * that problem we continue.  If we are out of space and the
1458 		 * buffer isn't useful for fixing that problem we continue.
1459 		 */
1460 
1461 		if (defrag > 0 && bp->b_kvasize == 0)
1462 			continue;
1463 		if (outofspace > 0 && bp->b_bufsize == 0)
1464 			continue;
1465 
1466 		/*
1467 		 * Start freeing the bp.  This is somewhat involved.  nbp
1468 		 * remains valid only for QUEUE_EMPTY[KVA] bp's.
1469 		 */
1470 
1471 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
1472 			panic("getnewbuf: locked buf");
1473 		bremfree(bp);
1474 
1475 		if (qindex == QUEUE_CLEAN) {
1476 			if (bp->b_flags & B_VMIO) {
1477 				bp->b_flags &= ~B_ASYNC;
1478 				vfs_vmio_release(bp);
1479 			}
1480 			if (bp->b_vp)
1481 				brelvp(bp);
1482 		}
1483 
1484 		/*
1485 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1486 		 * the scan from this point on.
1487 		 *
1488 		 * Get the rest of the buffer freed up.  b_kva* is still
1489 		 * valid after this operation.
1490 		 */
1491 
1492 		if (bp->b_rcred != NOCRED) {
1493 			crfree(bp->b_rcred);
1494 			bp->b_rcred = NOCRED;
1495 		}
1496 		if (bp->b_wcred != NOCRED) {
1497 			crfree(bp->b_wcred);
1498 			bp->b_wcred = NOCRED;
1499 		}
1500 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1501 			(*bioops.io_deallocate)(bp);
1502 		LIST_REMOVE(bp, b_hash);
1503 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1504 
1505 		if (bp->b_bufsize)
1506 			allocbuf(bp, 0);
1507 
1508 		bp->b_flags = 0;
1509 		bp->b_dev = NODEV;
1510 		bp->b_vp = NULL;
1511 		bp->b_blkno = bp->b_lblkno = 0;
1512 		bp->b_offset = NOOFFSET;
1513 		bp->b_iodone = 0;
1514 		bp->b_error = 0;
1515 		bp->b_resid = 0;
1516 		bp->b_bcount = 0;
1517 		bp->b_npages = 0;
1518 		bp->b_dirtyoff = bp->b_dirtyend = 0;
1519 
1520 		LIST_INIT(&bp->b_dep);
1521 
1522 		/*
1523 		 * Ok, now that we have a free buffer, if we are defragging
1524 		 * we have to recover the kvaspace.  If we are out of space
1525 		 * we have to free the buffer (which we just did), but we
1526 		 * do not have to recover kva space unless we hit a defrag
1527 		 * hicup.  Being able to avoid freeing the kva space leads
1528 		 * to a significant reduction in overhead.
1529 		 */
1530 
1531 		if (defrag > 0) {
1532 			defrag = -1;
1533 			bp->b_flags |= B_INVAL;
1534 			bfreekva(bp);
1535 			brelse(bp);
1536 			goto restart;
1537 		}
1538 
1539 		if (outofspace > 0) {
1540 			outofspace = -1;
1541 			bp->b_flags |= B_INVAL;
1542 			if (defrag < 0)
1543 				bfreekva(bp);
1544 			brelse(bp);
1545 			goto restart;
1546 		}
1547 
1548 		/*
1549 		 * We are done
1550 		 */
1551 		break;
1552 	}
1553 
1554 	/*
1555 	 * If we exhausted our list, sleep as appropriate.  We may have to
1556 	 * wakeup various daemons and write out some dirty buffers.
1557 	 *
1558 	 * Generally we are sleeping due to insufficient buffer space.
1559 	 */
1560 
1561 	if (bp == NULL) {
1562 		int flags;
1563 		char *waitmsg;
1564 
1565 		if (defrag > 0) {
1566 			flags = VFS_BIO_NEED_KVASPACE;
1567 			waitmsg = "nbufkv";
1568 		} else if (outofspace > 0) {
1569 			waitmsg = "nbufbs";
1570 			flags = VFS_BIO_NEED_BUFSPACE;
1571 		} else {
1572 			waitmsg = "newbuf";
1573 			flags = VFS_BIO_NEED_ANY;
1574 		}
1575 
1576 		bd_speedup();	/* heeeelp */
1577 
1578 		needsbuffer |= flags;
1579 		while (needsbuffer & flags) {
1580 			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1581 			    waitmsg, slptimeo))
1582 				return (NULL);
1583 		}
1584 	} else {
1585 		/*
1586 		 * We finally have a valid bp.  We aren't quite out of the
1587 		 * woods, we still have to reserve kva space.
1588 		 */
1589 		vm_offset_t addr = 0;
1590 
1591 		maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1592 
1593 		if (maxsize != bp->b_kvasize) {
1594 			bfreekva(bp);
1595 
1596 			if (vm_map_findspace(buffer_map,
1597 				vm_map_min(buffer_map), maxsize, &addr)) {
1598 				/*
1599 				 * Uh oh.  Buffer map is to fragmented.  Try
1600 				 * to defragment.
1601 				 */
1602 				if (defrag <= 0) {
1603 					defrag = 1;
1604 					bp->b_flags |= B_INVAL;
1605 					brelse(bp);
1606 					goto restart;
1607 				}
1608 				/*
1609 				 * Uh oh.  We couldn't seem to defragment
1610 				 */
1611 				panic("getnewbuf: unreachable code reached");
1612 			}
1613 		}
1614 		if (addr) {
1615 			vm_map_insert(buffer_map, NULL, 0,
1616 				addr, addr + maxsize,
1617 				VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1618 
1619 			bp->b_kvabase = (caddr_t) addr;
1620 			bp->b_kvasize = maxsize;
1621 		}
1622 		bp->b_data = bp->b_kvabase;
1623 	}
1624 	return(bp);
1625 }
1626 
1627 /*
1628  *	waitfreebuffers:
1629  *
1630  *	Wait for sufficient free buffers.  Only called from normal processes.
1631  */
1632 
1633 static void
1634 waitfreebuffers(int slpflag, int slptimeo)
1635 {
1636 	while (numfreebuffers < hifreebuffers) {
1637 		if (numfreebuffers >= hifreebuffers)
1638 			break;
1639 		needsbuffer |= VFS_BIO_NEED_FREE;
1640 		if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
1641 			break;
1642 	}
1643 }
1644 
1645 /*
1646  *	buf_daemon:
1647  *
1648  *	buffer flushing daemon.  Buffers are normally flushed by the
1649  *	update daemon but if it cannot keep up this process starts to
1650  *	take the load in an attempt to prevent getnewbuf() from blocking.
1651  */
1652 
1653 static struct proc *bufdaemonproc;
1654 static int bd_interval;
1655 static int bd_flushto;
1656 static int bd_flushinc;
1657 
1658 static struct kproc_desc buf_kp = {
1659 	"bufdaemon",
1660 	buf_daemon,
1661 	&bufdaemonproc
1662 };
1663 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp)
1664 
1665 static void
1666 buf_daemon()
1667 {
1668 	int s;
1669 	/*
1670 	 * This process is allowed to take the buffer cache to the limit
1671 	 */
1672 	curproc->p_flag |= P_BUFEXHAUST;
1673 	s = splbio();
1674 
1675 	bd_interval = 5 * hz;	/* dynamically adjusted */
1676 	bd_flushto = hidirtybuffers;	/* dynamically adjusted */
1677 	bd_flushinc = 1;
1678 
1679 	while (TRUE) {
1680 		bd_request = 0;
1681 
1682 		/*
1683 		 * Do the flush.  Limit the number of buffers we flush in one
1684 		 * go.  The failure condition occurs when processes are writing
1685 		 * buffers faster then we can dispose of them.  In this case
1686 		 * we may be flushing so often that the previous set of flushes
1687 		 * have not had time to complete, causing us to run out of
1688 		 * physical buffers and block.
1689 		 */
1690 		{
1691 			int runcount = maxbdrun;
1692 
1693 			while (numdirtybuffers > bd_flushto && runcount) {
1694 				--runcount;
1695 				if (flushbufqueues() == 0)
1696 					break;
1697 			}
1698 		}
1699 
1700 		if (bd_request ||
1701 		    tsleep(&bd_request, PVM, "psleep", bd_interval) == 0) {
1702 			/*
1703 			 * Another request is pending or we were woken up
1704 			 * without timing out.  Flush more.
1705 			 */
1706 			--bd_flushto;
1707 			if (bd_flushto >= numdirtybuffers - 5) {
1708 				bd_flushto = numdirtybuffers - 10;
1709 				bd_flushinc = 1;
1710 			}
1711 			if (bd_flushto < 2)
1712 				bd_flushto = 2;
1713 		} else {
1714 			/*
1715 			 * We slept and timed out, we can slow down.
1716 			 */
1717 			bd_flushto += bd_flushinc;
1718 			if (bd_flushto > hidirtybuffers)
1719 				bd_flushto = hidirtybuffers;
1720 			++bd_flushinc;
1721 			if (bd_flushinc > hidirtybuffers / 20 + 1)
1722 				bd_flushinc = hidirtybuffers / 20 + 1;
1723 		}
1724 
1725 		/*
1726 		 * Set the interval on a linear scale based on hidirtybuffers
1727 		 * with a maximum frequency of 1/10 second.
1728 		 */
1729 		bd_interval = bd_flushto * 5 * hz / hidirtybuffers;
1730 		if (bd_interval < hz / 10)
1731 			bd_interval = hz / 10;
1732 	}
1733 }
1734 
1735 /*
1736  *	flushbufqueues:
1737  *
1738  *	Try to flush a buffer in the dirty queue.  We must be careful to
1739  *	free up B_INVAL buffers instead of write them, which NFS is
1740  *	particularly sensitive to.
1741  */
1742 
1743 static int
1744 flushbufqueues(void)
1745 {
1746 	struct buf *bp;
1747 	int r = 0;
1748 
1749 	bp = TAILQ_FIRST(&bufqueues[QUEUE_DIRTY]);
1750 
1751 	while (bp) {
1752 		KASSERT((bp->b_flags & B_DELWRI), ("unexpected clean buffer %p", bp));
1753 		if ((bp->b_flags & B_DELWRI) != 0) {
1754 			if (bp->b_flags & B_INVAL) {
1755 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
1756 					panic("flushbufqueues: locked buf");
1757 				bremfree(bp);
1758 				brelse(bp);
1759 				++r;
1760 				break;
1761 			}
1762 			vfs_bio_awrite(bp);
1763 			++r;
1764 			break;
1765 		}
1766 		bp = TAILQ_NEXT(bp, b_freelist);
1767 	}
1768 	return(r);
1769 }
1770 
1771 /*
1772  * Check to see if a block is currently memory resident.
1773  */
1774 struct buf *
1775 incore(struct vnode * vp, daddr_t blkno)
1776 {
1777 	struct buf *bp;
1778 
1779 	int s = splbio();
1780 	bp = gbincore(vp, blkno);
1781 	splx(s);
1782 	return (bp);
1783 }
1784 
1785 /*
1786  * Returns true if no I/O is needed to access the
1787  * associated VM object.  This is like incore except
1788  * it also hunts around in the VM system for the data.
1789  */
1790 
1791 int
1792 inmem(struct vnode * vp, daddr_t blkno)
1793 {
1794 	vm_object_t obj;
1795 	vm_offset_t toff, tinc, size;
1796 	vm_page_t m;
1797 	vm_ooffset_t off;
1798 
1799 	if (incore(vp, blkno))
1800 		return 1;
1801 	if (vp->v_mount == NULL)
1802 		return 0;
1803 	if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
1804 		return 0;
1805 
1806 	obj = vp->v_object;
1807 	size = PAGE_SIZE;
1808 	if (size > vp->v_mount->mnt_stat.f_iosize)
1809 		size = vp->v_mount->mnt_stat.f_iosize;
1810 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
1811 
1812 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1813 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1814 		if (!m)
1815 			return 0;
1816 		tinc = size;
1817 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
1818 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
1819 		if (vm_page_is_valid(m,
1820 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1821 			return 0;
1822 	}
1823 	return 1;
1824 }
1825 
1826 /*
1827  *	vfs_setdirty:
1828  *
1829  *	Sets the dirty range for a buffer based on the status of the dirty
1830  *	bits in the pages comprising the buffer.
1831  *
1832  *	The range is limited to the size of the buffer.
1833  *
1834  *	This routine is primarily used by NFS, but is generalized for the
1835  *	B_VMIO case.
1836  */
1837 static void
1838 vfs_setdirty(struct buf *bp)
1839 {
1840 	int i;
1841 	vm_object_t object;
1842 
1843 	/*
1844 	 * Degenerate case - empty buffer
1845 	 */
1846 
1847 	if (bp->b_bufsize == 0)
1848 		return;
1849 
1850 	/*
1851 	 * We qualify the scan for modified pages on whether the
1852 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1853 	 * is not cleared simply by protecting pages off.
1854 	 */
1855 
1856 	if ((bp->b_flags & B_VMIO) == 0)
1857 		return;
1858 
1859 	object = bp->b_pages[0]->object;
1860 
1861 	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1862 		printf("Warning: object %p writeable but not mightbedirty\n", object);
1863 	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1864 		printf("Warning: object %p mightbedirty but not writeable\n", object);
1865 
1866 	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1867 		vm_offset_t boffset;
1868 		vm_offset_t eoffset;
1869 
1870 		/*
1871 		 * test the pages to see if they have been modified directly
1872 		 * by users through the VM system.
1873 		 */
1874 		for (i = 0; i < bp->b_npages; i++) {
1875 			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1876 			vm_page_test_dirty(bp->b_pages[i]);
1877 		}
1878 
1879 		/*
1880 		 * Calculate the encompassing dirty range, boffset and eoffset,
1881 		 * (eoffset - boffset) bytes.
1882 		 */
1883 
1884 		for (i = 0; i < bp->b_npages; i++) {
1885 			if (bp->b_pages[i]->dirty)
1886 				break;
1887 		}
1888 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1889 
1890 		for (i = bp->b_npages - 1; i >= 0; --i) {
1891 			if (bp->b_pages[i]->dirty) {
1892 				break;
1893 			}
1894 		}
1895 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1896 
1897 		/*
1898 		 * Fit it to the buffer.
1899 		 */
1900 
1901 		if (eoffset > bp->b_bcount)
1902 			eoffset = bp->b_bcount;
1903 
1904 		/*
1905 		 * If we have a good dirty range, merge with the existing
1906 		 * dirty range.
1907 		 */
1908 
1909 		if (boffset < eoffset) {
1910 			if (bp->b_dirtyoff > boffset)
1911 				bp->b_dirtyoff = boffset;
1912 			if (bp->b_dirtyend < eoffset)
1913 				bp->b_dirtyend = eoffset;
1914 		}
1915 	}
1916 }
1917 
1918 /*
1919  *	getblk:
1920  *
1921  *	Get a block given a specified block and offset into a file/device.
1922  *	The buffers B_DONE bit will be cleared on return, making it almost
1923  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
1924  *	return.  The caller should clear B_INVAL prior to initiating a
1925  *	READ.
1926  *
1927  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
1928  *	an existing buffer.
1929  *
1930  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
1931  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
1932  *	and then cleared based on the backing VM.  If the previous buffer is
1933  *	non-0-sized but invalid, B_CACHE will be cleared.
1934  *
1935  *	If getblk() must create a new buffer, the new buffer is returned with
1936  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
1937  *	case it is returned with B_INVAL clear and B_CACHE set based on the
1938  *	backing VM.
1939  *
1940  *	getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos
1941  *	B_CACHE bit is clear.
1942  *
1943  *	What this means, basically, is that the caller should use B_CACHE to
1944  *	determine whether the buffer is fully valid or not and should clear
1945  *	B_INVAL prior to issuing a read.  If the caller intends to validate
1946  *	the buffer by loading its data area with something, the caller needs
1947  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
1948  *	the caller should set B_CACHE ( as an optimization ), else the caller
1949  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
1950  *	a write attempt or if it was a successfull read.  If the caller
1951  *	intends to issue a READ, the caller must clear B_INVAL and B_ERROR
1952  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
1953  */
1954 struct buf *
1955 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1956 {
1957 	struct buf *bp;
1958 	int s;
1959 	struct bufhashhdr *bh;
1960 
1961 #if !defined(MAX_PERF)
1962 	if (size > MAXBSIZE)
1963 		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1964 #endif
1965 
1966 	s = splbio();
1967 loop:
1968 	/*
1969 	 * Block if we are low on buffers.   Certain processes are allowed
1970 	 * to completely exhaust the buffer cache.
1971          *
1972          * If this check ever becomes a bottleneck it may be better to
1973          * move it into the else, when gbincore() fails.  At the moment
1974          * it isn't a problem.
1975          */
1976 	if (!curproc || (curproc->p_flag & P_BUFEXHAUST)) {
1977 		if (numfreebuffers == 0) {
1978 			if (!curproc)
1979 				return NULL;
1980 			needsbuffer |= VFS_BIO_NEED_ANY;
1981 			tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1982 			    slptimeo);
1983 		}
1984 	} else if (numfreebuffers < lofreebuffers) {
1985 		waitfreebuffers(slpflag, slptimeo);
1986 	}
1987 
1988 	if ((bp = gbincore(vp, blkno))) {
1989 		/*
1990 		 * Buffer is in-core.  If the buffer is not busy, it must
1991 		 * be on a queue.
1992 		 */
1993 
1994 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1995 			if (BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL,
1996 			    "getblk", slpflag, slptimeo) == ENOLCK)
1997 				goto loop;
1998 			splx(s);
1999 			return (struct buf *) NULL;
2000 		}
2001 
2002 		/*
2003 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
2004 		 * invalid.  Ohterwise, for a non-VMIO buffer, B_CACHE is set
2005 		 * and for a VMIO buffer B_CACHE is adjusted according to the
2006 		 * backing VM cache.
2007 		 */
2008 		if (bp->b_flags & B_INVAL)
2009 			bp->b_flags &= ~B_CACHE;
2010 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
2011 			bp->b_flags |= B_CACHE;
2012 		bremfree(bp);
2013 
2014 		/*
2015 		 * check for size inconsistancies for non-VMIO case.
2016 		 */
2017 
2018 		if (bp->b_bcount != size) {
2019 			if ((bp->b_flags & B_VMIO) == 0 ||
2020 			    (size > bp->b_kvasize)) {
2021 				if (bp->b_flags & B_DELWRI) {
2022 					bp->b_flags |= B_NOCACHE;
2023 					VOP_BWRITE(bp->b_vp, bp);
2024 				} else {
2025 					if ((bp->b_flags & B_VMIO) &&
2026 					   (LIST_FIRST(&bp->b_dep) == NULL)) {
2027 						bp->b_flags |= B_RELBUF;
2028 						brelse(bp);
2029 					} else {
2030 						bp->b_flags |= B_NOCACHE;
2031 						VOP_BWRITE(bp->b_vp, bp);
2032 					}
2033 				}
2034 				goto loop;
2035 			}
2036 		}
2037 
2038 		/*
2039 		 * If the size is inconsistant in the VMIO case, we can resize
2040 		 * the buffer.  This might lead to B_CACHE getting set or
2041 		 * cleared.  If the size has not changed, B_CACHE remains
2042 		 * unchanged from its previous state.
2043 		 */
2044 
2045 		if (bp->b_bcount != size)
2046 			allocbuf(bp, size);
2047 
2048 		KASSERT(bp->b_offset != NOOFFSET,
2049 		    ("getblk: no buffer offset"));
2050 
2051 		/*
2052 		 * A buffer with B_DELWRI set and B_CACHE clear must
2053 		 * be committed before we can return the buffer in
2054 		 * order to prevent the caller from issuing a read
2055 		 * ( due to B_CACHE not being set ) and overwriting
2056 		 * it.
2057 		 *
2058 		 * Most callers, including NFS and FFS, need this to
2059 		 * operate properly either because they assume they
2060 		 * can issue a read if B_CACHE is not set, or because
2061 		 * ( for example ) an uncached B_DELWRI might loop due
2062 		 * to softupdates re-dirtying the buffer.  In the latter
2063 		 * case, B_CACHE is set after the first write completes,
2064 		 * preventing further loops.
2065 		 */
2066 
2067 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
2068 			VOP_BWRITE(bp->b_vp, bp);
2069 			goto loop;
2070 		}
2071 
2072 		splx(s);
2073 		bp->b_flags &= ~B_DONE;
2074 	} else {
2075 		/*
2076 		 * Buffer is not in-core, create new buffer.  The buffer
2077 		 * returned by getnewbuf() is locked.  Note that the returned
2078 		 * buffer is also considered valid (not marked B_INVAL).
2079 		 */
2080 		int bsize, maxsize, vmio;
2081 		off_t offset;
2082 
2083 		if (vn_isdisk(vp))
2084 			bsize = DEV_BSIZE;
2085 		else if (vp->v_mountedhere)
2086 			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
2087 		else if (vp->v_mount)
2088 			bsize = vp->v_mount->mnt_stat.f_iosize;
2089 		else
2090 			bsize = size;
2091 
2092 		offset = (off_t)blkno * bsize;
2093 		vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
2094 		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
2095 		maxsize = imax(maxsize, bsize);
2096 
2097 		if ((bp = getnewbuf(slpflag, slptimeo, size, maxsize)) == NULL) {
2098 			if (slpflag || slptimeo) {
2099 				splx(s);
2100 				return NULL;
2101 			}
2102 			goto loop;
2103 		}
2104 
2105 		/*
2106 		 * This code is used to make sure that a buffer is not
2107 		 * created while the getnewbuf routine is blocked.
2108 		 * This can be a problem whether the vnode is locked or not.
2109 		 * If the buffer is created out from under us, we have to
2110 		 * throw away the one we just created.  There is now window
2111 		 * race because we are safely running at splbio() from the
2112 		 * point of the duplicate buffer creation through to here,
2113 		 * and we've locked the buffer.
2114 		 */
2115 		if (gbincore(vp, blkno)) {
2116 			bp->b_flags |= B_INVAL;
2117 			brelse(bp);
2118 			goto loop;
2119 		}
2120 
2121 		/*
2122 		 * Insert the buffer into the hash, so that it can
2123 		 * be found by incore.
2124 		 */
2125 		bp->b_blkno = bp->b_lblkno = blkno;
2126 		bp->b_offset = offset;
2127 
2128 		bgetvp(vp, bp);
2129 		LIST_REMOVE(bp, b_hash);
2130 		bh = bufhash(vp, blkno);
2131 		LIST_INSERT_HEAD(bh, bp, b_hash);
2132 
2133 		/*
2134 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2135 		 * buffer size starts out as 0, B_CACHE will be set by
2136 		 * allocbuf() for the VMIO case prior to it testing the
2137 		 * backing store for validity.
2138 		 */
2139 
2140 		if (vmio) {
2141 			bp->b_flags |= B_VMIO;
2142 #if defined(VFS_BIO_DEBUG)
2143 			if (vp->v_type != VREG && vp->v_type != VBLK)
2144 				printf("getblk: vmioing file type %d???\n", vp->v_type);
2145 #endif
2146 		} else {
2147 			bp->b_flags &= ~B_VMIO;
2148 		}
2149 
2150 		allocbuf(bp, size);
2151 
2152 		splx(s);
2153 		bp->b_flags &= ~B_DONE;
2154 	}
2155 	return (bp);
2156 }
2157 
2158 /*
2159  * Get an empty, disassociated buffer of given size.  The buffer is initially
2160  * set to B_INVAL.
2161  */
2162 struct buf *
2163 geteblk(int size)
2164 {
2165 	struct buf *bp;
2166 	int s;
2167 
2168 	s = splbio();
2169 	while ((bp = getnewbuf(0, 0, size, MAXBSIZE)) == 0);
2170 	splx(s);
2171 	allocbuf(bp, size);
2172 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2173 	return (bp);
2174 }
2175 
2176 
2177 /*
2178  * This code constitutes the buffer memory from either anonymous system
2179  * memory (in the case of non-VMIO operations) or from an associated
2180  * VM object (in the case of VMIO operations).  This code is able to
2181  * resize a buffer up or down.
2182  *
2183  * Note that this code is tricky, and has many complications to resolve
2184  * deadlock or inconsistant data situations.  Tread lightly!!!
2185  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2186  * the caller.  Calling this code willy nilly can result in the loss of data.
2187  *
2188  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2189  * B_CACHE for the non-VMIO case.
2190  */
2191 
2192 int
2193 allocbuf(struct buf *bp, int size)
2194 {
2195 	int newbsize, mbsize;
2196 	int i;
2197 
2198 #if !defined(MAX_PERF)
2199 	if (BUF_REFCNT(bp) == 0)
2200 		panic("allocbuf: buffer not busy");
2201 
2202 	if (bp->b_kvasize < size)
2203 		panic("allocbuf: buffer too small");
2204 #endif
2205 
2206 	if ((bp->b_flags & B_VMIO) == 0) {
2207 		caddr_t origbuf;
2208 		int origbufsize;
2209 		/*
2210 		 * Just get anonymous memory from the kernel.  Don't
2211 		 * mess with B_CACHE.
2212 		 */
2213 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2214 #if !defined(NO_B_MALLOC)
2215 		if (bp->b_flags & B_MALLOC)
2216 			newbsize = mbsize;
2217 		else
2218 #endif
2219 			newbsize = round_page(size);
2220 
2221 		if (newbsize < bp->b_bufsize) {
2222 #if !defined(NO_B_MALLOC)
2223 			/*
2224 			 * malloced buffers are not shrunk
2225 			 */
2226 			if (bp->b_flags & B_MALLOC) {
2227 				if (newbsize) {
2228 					bp->b_bcount = size;
2229 				} else {
2230 					free(bp->b_data, M_BIOBUF);
2231 					bufspace -= bp->b_bufsize;
2232 					bufmallocspace -= bp->b_bufsize;
2233 					runningbufspace -= bp->b_bufsize;
2234 					if (bp->b_bufsize)
2235 						bufspacewakeup();
2236 					bp->b_data = bp->b_kvabase;
2237 					bp->b_bufsize = 0;
2238 					bp->b_bcount = 0;
2239 					bp->b_flags &= ~B_MALLOC;
2240 				}
2241 				return 1;
2242 			}
2243 #endif
2244 			vm_hold_free_pages(
2245 			    bp,
2246 			    (vm_offset_t) bp->b_data + newbsize,
2247 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2248 		} else if (newbsize > bp->b_bufsize) {
2249 #if !defined(NO_B_MALLOC)
2250 			/*
2251 			 * We only use malloced memory on the first allocation.
2252 			 * and revert to page-allocated memory when the buffer
2253 			 * grows.
2254 			 */
2255 			if ( (bufmallocspace < maxbufmallocspace) &&
2256 				(bp->b_bufsize == 0) &&
2257 				(mbsize <= PAGE_SIZE/2)) {
2258 
2259 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2260 				bp->b_bufsize = mbsize;
2261 				bp->b_bcount = size;
2262 				bp->b_flags |= B_MALLOC;
2263 				bufspace += mbsize;
2264 				bufmallocspace += mbsize;
2265 				runningbufspace += bp->b_bufsize;
2266 				return 1;
2267 			}
2268 #endif
2269 			origbuf = NULL;
2270 			origbufsize = 0;
2271 #if !defined(NO_B_MALLOC)
2272 			/*
2273 			 * If the buffer is growing on its other-than-first allocation,
2274 			 * then we revert to the page-allocation scheme.
2275 			 */
2276 			if (bp->b_flags & B_MALLOC) {
2277 				origbuf = bp->b_data;
2278 				origbufsize = bp->b_bufsize;
2279 				bp->b_data = bp->b_kvabase;
2280 				bufspace -= bp->b_bufsize;
2281 				bufmallocspace -= bp->b_bufsize;
2282 				runningbufspace -= bp->b_bufsize;
2283 				if (bp->b_bufsize)
2284 					bufspacewakeup();
2285 				bp->b_bufsize = 0;
2286 				bp->b_flags &= ~B_MALLOC;
2287 				newbsize = round_page(newbsize);
2288 			}
2289 #endif
2290 			vm_hold_load_pages(
2291 			    bp,
2292 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2293 			    (vm_offset_t) bp->b_data + newbsize);
2294 #if !defined(NO_B_MALLOC)
2295 			if (origbuf) {
2296 				bcopy(origbuf, bp->b_data, origbufsize);
2297 				free(origbuf, M_BIOBUF);
2298 			}
2299 #endif
2300 		}
2301 	} else {
2302 		vm_page_t m;
2303 		int desiredpages;
2304 
2305 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2306 		desiredpages = (size == 0) ? 0 :
2307 			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2308 
2309 #if !defined(NO_B_MALLOC)
2310 		if (bp->b_flags & B_MALLOC)
2311 			panic("allocbuf: VMIO buffer can't be malloced");
2312 #endif
2313 		/*
2314 		 * Set B_CACHE initially if buffer is 0 length or will become
2315 		 * 0-length.
2316 		 */
2317 		if (size == 0 || bp->b_bufsize == 0)
2318 			bp->b_flags |= B_CACHE;
2319 
2320 		if (newbsize < bp->b_bufsize) {
2321 			/*
2322 			 * DEV_BSIZE aligned new buffer size is less then the
2323 			 * DEV_BSIZE aligned existing buffer size.  Figure out
2324 			 * if we have to remove any pages.
2325 			 */
2326 			if (desiredpages < bp->b_npages) {
2327 				for (i = desiredpages; i < bp->b_npages; i++) {
2328 					/*
2329 					 * the page is not freed here -- it
2330 					 * is the responsibility of
2331 					 * vnode_pager_setsize
2332 					 */
2333 					m = bp->b_pages[i];
2334 					KASSERT(m != bogus_page,
2335 					    ("allocbuf: bogus page found"));
2336 					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2337 						;
2338 
2339 					bp->b_pages[i] = NULL;
2340 					vm_page_unwire(m, 0);
2341 				}
2342 				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2343 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2344 				bp->b_npages = desiredpages;
2345 			}
2346 		} else if (size > bp->b_bcount) {
2347 			/*
2348 			 * We are growing the buffer, possibly in a
2349 			 * byte-granular fashion.
2350 			 */
2351 			struct vnode *vp;
2352 			vm_object_t obj;
2353 			vm_offset_t toff;
2354 			vm_offset_t tinc;
2355 
2356 			/*
2357 			 * Step 1, bring in the VM pages from the object,
2358 			 * allocating them if necessary.  We must clear
2359 			 * B_CACHE if these pages are not valid for the
2360 			 * range covered by the buffer.
2361 			 */
2362 
2363 			vp = bp->b_vp;
2364 			obj = vp->v_object;
2365 
2366 			while (bp->b_npages < desiredpages) {
2367 				vm_page_t m;
2368 				vm_pindex_t pi;
2369 
2370 				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2371 				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2372 					m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL);
2373 					if (m == NULL) {
2374 						VM_WAIT;
2375 						vm_pageout_deficit += desiredpages - bp->b_npages;
2376 					} else {
2377 						vm_page_wire(m);
2378 						vm_page_wakeup(m);
2379 						bp->b_flags &= ~B_CACHE;
2380 						bp->b_pages[bp->b_npages] = m;
2381 						++bp->b_npages;
2382 					}
2383 					continue;
2384 				}
2385 
2386 				/*
2387 				 * We found a page.  If we have to sleep on it,
2388 				 * retry because it might have gotten freed out
2389 				 * from under us.
2390 				 *
2391 				 * We can only test PG_BUSY here.  Blocking on
2392 				 * m->busy might lead to a deadlock:
2393 				 *
2394 				 *  vm_fault->getpages->cluster_read->allocbuf
2395 				 *
2396 				 */
2397 
2398 				if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2399 					continue;
2400 
2401 				/*
2402 				 * We have a good page.  Should we wakeup the
2403 				 * page daemon?
2404 				 */
2405 				if ((curproc != pageproc) &&
2406 				    ((m->queue - m->pc) == PQ_CACHE) &&
2407 				    ((cnt.v_free_count + cnt.v_cache_count) <
2408 					(cnt.v_free_min + cnt.v_cache_min))) {
2409 					pagedaemon_wakeup();
2410 				}
2411 				vm_page_flag_clear(m, PG_ZERO);
2412 				vm_page_wire(m);
2413 				bp->b_pages[bp->b_npages] = m;
2414 				++bp->b_npages;
2415 			}
2416 
2417 			/*
2418 			 * Step 2.  We've loaded the pages into the buffer,
2419 			 * we have to figure out if we can still have B_CACHE
2420 			 * set.  Note that B_CACHE is set according to the
2421 			 * byte-granular range ( bcount and size ), new the
2422 			 * aligned range ( newbsize ).
2423 			 *
2424 			 * The VM test is against m->valid, which is DEV_BSIZE
2425 			 * aligned.  Needless to say, the validity of the data
2426 			 * needs to also be DEV_BSIZE aligned.  Note that this
2427 			 * fails with NFS if the server or some other client
2428 			 * extends the file's EOF.  If our buffer is resized,
2429 			 * B_CACHE may remain set! XXX
2430 			 */
2431 
2432 			toff = bp->b_bcount;
2433 			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2434 
2435 			while ((bp->b_flags & B_CACHE) && toff < size) {
2436 				vm_pindex_t pi;
2437 
2438 				if (tinc > (size - toff))
2439 					tinc = size - toff;
2440 
2441 				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2442 				    PAGE_SHIFT;
2443 
2444 				vfs_buf_test_cache(
2445 				    bp,
2446 				    bp->b_offset,
2447 				    toff,
2448 				    tinc,
2449 				    bp->b_pages[pi]
2450 				);
2451 				toff += tinc;
2452 				tinc = PAGE_SIZE;
2453 			}
2454 
2455 			/*
2456 			 * Step 3, fixup the KVM pmap.  Remember that
2457 			 * bp->b_data is relative to bp->b_offset, but
2458 			 * bp->b_offset may be offset into the first page.
2459 			 */
2460 
2461 			bp->b_data = (caddr_t)
2462 			    trunc_page((vm_offset_t)bp->b_data);
2463 			pmap_qenter(
2464 			    (vm_offset_t)bp->b_data,
2465 			    bp->b_pages,
2466 			    bp->b_npages
2467 			);
2468 			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2469 			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2470 		}
2471 	}
2472 	if (bp->b_flags & B_VMIO)
2473 		vmiospace += (newbsize - bp->b_bufsize);
2474 	bufspace += (newbsize - bp->b_bufsize);
2475 	runningbufspace += (newbsize - bp->b_bufsize);
2476 	if (newbsize < bp->b_bufsize)
2477 		bufspacewakeup();
2478 	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2479 	bp->b_bcount = size;		/* requested buffer size	*/
2480 	return 1;
2481 }
2482 
2483 /*
2484  *	biowait:
2485  *
2486  *	Wait for buffer I/O completion, returning error status.  The buffer
2487  *	is left locked and B_DONE on return.  B_EINTR is converted into a EINTR
2488  *	error and cleared.
2489  */
2490 int
2491 biowait(register struct buf * bp)
2492 {
2493 	int s;
2494 
2495 	s = splbio();
2496 	while ((bp->b_flags & B_DONE) == 0) {
2497 #if defined(NO_SCHEDULE_MODS)
2498 		tsleep(bp, PRIBIO, "biowait", 0);
2499 #else
2500 		if (bp->b_flags & B_READ)
2501 			tsleep(bp, PRIBIO, "biord", 0);
2502 		else
2503 			tsleep(bp, PRIBIO, "biowr", 0);
2504 #endif
2505 	}
2506 	splx(s);
2507 	if (bp->b_flags & B_EINTR) {
2508 		bp->b_flags &= ~B_EINTR;
2509 		return (EINTR);
2510 	}
2511 	if (bp->b_flags & B_ERROR) {
2512 		return (bp->b_error ? bp->b_error : EIO);
2513 	} else {
2514 		return (0);
2515 	}
2516 }
2517 
2518 /*
2519  *	biodone:
2520  *
2521  *	Finish I/O on a buffer, optionally calling a completion function.
2522  *	This is usually called from an interrupt so process blocking is
2523  *	not allowed.
2524  *
2525  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2526  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
2527  *	assuming B_INVAL is clear.
2528  *
2529  *	For the VMIO case, we set B_CACHE if the op was a read and no
2530  *	read error occured, or if the op was a write.  B_CACHE is never
2531  *	set if the buffer is invalid or otherwise uncacheable.
2532  *
2533  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
2534  *	initiator to leave B_INVAL set to brelse the buffer out of existance
2535  *	in the biodone routine.
2536  */
2537 void
2538 biodone(register struct buf * bp)
2539 {
2540 	int s;
2541 
2542 	s = splbio();
2543 
2544 	KASSERT(BUF_REFCNT(bp) > 0, ("biodone: bp %p not busy %d", bp, BUF_REFCNT(bp)));
2545 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2546 
2547 	bp->b_flags |= B_DONE;
2548 
2549 	if (bp->b_flags & B_FREEBUF) {
2550 		brelse(bp);
2551 		splx(s);
2552 		return;
2553 	}
2554 
2555 	if ((bp->b_flags & B_READ) == 0) {
2556 		vwakeup(bp);
2557 	}
2558 
2559 	/* call optional completion function if requested */
2560 	if (bp->b_flags & B_CALL) {
2561 		bp->b_flags &= ~B_CALL;
2562 		(*bp->b_iodone) (bp);
2563 		splx(s);
2564 		return;
2565 	}
2566 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
2567 		(*bioops.io_complete)(bp);
2568 
2569 	if (bp->b_flags & B_VMIO) {
2570 		int i, resid;
2571 		vm_ooffset_t foff;
2572 		vm_page_t m;
2573 		vm_object_t obj;
2574 		int iosize;
2575 		struct vnode *vp = bp->b_vp;
2576 
2577 		obj = vp->v_object;
2578 
2579 #if defined(VFS_BIO_DEBUG)
2580 		if (vp->v_usecount == 0) {
2581 			panic("biodone: zero vnode ref count");
2582 		}
2583 
2584 		if (vp->v_object == NULL) {
2585 			panic("biodone: missing VM object");
2586 		}
2587 
2588 		if ((vp->v_flag & VOBJBUF) == 0) {
2589 			panic("biodone: vnode is not setup for merged cache");
2590 		}
2591 #endif
2592 
2593 		foff = bp->b_offset;
2594 		KASSERT(bp->b_offset != NOOFFSET,
2595 		    ("biodone: no buffer offset"));
2596 
2597 #if !defined(MAX_PERF)
2598 		if (!obj) {
2599 			panic("biodone: no object");
2600 		}
2601 #endif
2602 #if defined(VFS_BIO_DEBUG)
2603 		if (obj->paging_in_progress < bp->b_npages) {
2604 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2605 			    obj->paging_in_progress, bp->b_npages);
2606 		}
2607 #endif
2608 
2609 		/*
2610 		 * Set B_CACHE if the op was a normal read and no error
2611 		 * occured.  B_CACHE is set for writes in the b*write()
2612 		 * routines.
2613 		 */
2614 		iosize = bp->b_bcount - bp->b_resid;
2615 		if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) {
2616 			bp->b_flags |= B_CACHE;
2617 		}
2618 
2619 		for (i = 0; i < bp->b_npages; i++) {
2620 			int bogusflag = 0;
2621 			m = bp->b_pages[i];
2622 			if (m == bogus_page) {
2623 				bogusflag = 1;
2624 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2625 				if (!m) {
2626 #if defined(VFS_BIO_DEBUG)
2627 					printf("biodone: page disappeared\n");
2628 #endif
2629 					vm_object_pip_subtract(obj, 1);
2630 					bp->b_flags &= ~B_CACHE;
2631 					continue;
2632 				}
2633 				bp->b_pages[i] = m;
2634 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2635 			}
2636 #if defined(VFS_BIO_DEBUG)
2637 			if (OFF_TO_IDX(foff) != m->pindex) {
2638 				printf(
2639 "biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2640 				    (unsigned long)foff, m->pindex);
2641 			}
2642 #endif
2643 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
2644 			if (resid > iosize)
2645 				resid = iosize;
2646 
2647 			/*
2648 			 * In the write case, the valid and clean bits are
2649 			 * already changed correctly ( see bdwrite() ), so we
2650 			 * only need to do this here in the read case.
2651 			 */
2652 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2653 				vfs_page_set_valid(bp, foff, i, m);
2654 			}
2655 			vm_page_flag_clear(m, PG_ZERO);
2656 
2657 			/*
2658 			 * when debugging new filesystems or buffer I/O methods, this
2659 			 * is the most common error that pops up.  if you see this, you
2660 			 * have not set the page busy flag correctly!!!
2661 			 */
2662 			if (m->busy == 0) {
2663 #if !defined(MAX_PERF)
2664 				printf("biodone: page busy < 0, "
2665 				    "pindex: %d, foff: 0x(%x,%x), "
2666 				    "resid: %d, index: %d\n",
2667 				    (int) m->pindex, (int)(foff >> 32),
2668 						(int) foff & 0xffffffff, resid, i);
2669 #endif
2670 				if (!vn_isdisk(vp))
2671 #if !defined(MAX_PERF)
2672 					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2673 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2674 					    (int) bp->b_lblkno,
2675 					    bp->b_flags, bp->b_npages);
2676 				else
2677 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2678 					    (int) bp->b_lblkno,
2679 					    bp->b_flags, bp->b_npages);
2680 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2681 				    m->valid, m->dirty, m->wire_count);
2682 #endif
2683 				panic("biodone: page busy < 0\n");
2684 			}
2685 			vm_page_io_finish(m);
2686 			vm_object_pip_subtract(obj, 1);
2687 			foff += resid;
2688 			iosize -= resid;
2689 		}
2690 		if (obj)
2691 			vm_object_pip_wakeupn(obj, 0);
2692 	}
2693 	/*
2694 	 * For asynchronous completions, release the buffer now. The brelse
2695 	 * will do a wakeup there if necessary - so no need to do a wakeup
2696 	 * here in the async case. The sync case always needs to do a wakeup.
2697 	 */
2698 
2699 	if (bp->b_flags & B_ASYNC) {
2700 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
2701 			brelse(bp);
2702 		else
2703 			bqrelse(bp);
2704 	} else {
2705 		wakeup(bp);
2706 	}
2707 	splx(s);
2708 }
2709 
2710 /*
2711  * This routine is called in lieu of iodone in the case of
2712  * incomplete I/O.  This keeps the busy status for pages
2713  * consistant.
2714  */
2715 void
2716 vfs_unbusy_pages(struct buf * bp)
2717 {
2718 	int i;
2719 
2720 	if (bp->b_flags & B_VMIO) {
2721 		struct vnode *vp = bp->b_vp;
2722 		vm_object_t obj = vp->v_object;
2723 
2724 		for (i = 0; i < bp->b_npages; i++) {
2725 			vm_page_t m = bp->b_pages[i];
2726 
2727 			if (m == bogus_page) {
2728 				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
2729 #if !defined(MAX_PERF)
2730 				if (!m) {
2731 					panic("vfs_unbusy_pages: page missing\n");
2732 				}
2733 #endif
2734 				bp->b_pages[i] = m;
2735 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2736 			}
2737 			vm_object_pip_subtract(obj, 1);
2738 			vm_page_flag_clear(m, PG_ZERO);
2739 			vm_page_io_finish(m);
2740 		}
2741 		vm_object_pip_wakeupn(obj, 0);
2742 	}
2743 }
2744 
2745 /*
2746  * vfs_page_set_valid:
2747  *
2748  *	Set the valid bits in a page based on the supplied offset.   The
2749  *	range is restricted to the buffer's size.
2750  *
2751  *	This routine is typically called after a read completes.
2752  */
2753 static void
2754 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2755 {
2756 	vm_ooffset_t soff, eoff;
2757 
2758 	/*
2759 	 * Start and end offsets in buffer.  eoff - soff may not cross a
2760 	 * page boundry or cross the end of the buffer.  The end of the
2761 	 * buffer, in this case, is our file EOF, not the allocation size
2762 	 * of the buffer.
2763 	 */
2764 	soff = off;
2765 	eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2766 	if (eoff > bp->b_offset + bp->b_bcount)
2767 		eoff = bp->b_offset + bp->b_bcount;
2768 
2769 	/*
2770 	 * Set valid range.  This is typically the entire buffer and thus the
2771 	 * entire page.
2772 	 */
2773 	if (eoff > soff) {
2774 		vm_page_set_validclean(
2775 		    m,
2776 		   (vm_offset_t) (soff & PAGE_MASK),
2777 		   (vm_offset_t) (eoff - soff)
2778 		);
2779 	}
2780 }
2781 
2782 /*
2783  * This routine is called before a device strategy routine.
2784  * It is used to tell the VM system that paging I/O is in
2785  * progress, and treat the pages associated with the buffer
2786  * almost as being PG_BUSY.  Also the object paging_in_progress
2787  * flag is handled to make sure that the object doesn't become
2788  * inconsistant.
2789  *
2790  * Since I/O has not been initiated yet, certain buffer flags
2791  * such as B_ERROR or B_INVAL may be in an inconsistant state
2792  * and should be ignored.
2793  */
2794 void
2795 vfs_busy_pages(struct buf * bp, int clear_modify)
2796 {
2797 	int i, bogus;
2798 
2799 	if (bp->b_flags & B_VMIO) {
2800 		struct vnode *vp = bp->b_vp;
2801 		vm_object_t obj = vp->v_object;
2802 		vm_ooffset_t foff;
2803 
2804 		foff = bp->b_offset;
2805 		KASSERT(bp->b_offset != NOOFFSET,
2806 		    ("vfs_busy_pages: no buffer offset"));
2807 		vfs_setdirty(bp);
2808 
2809 retry:
2810 		for (i = 0; i < bp->b_npages; i++) {
2811 			vm_page_t m = bp->b_pages[i];
2812 			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
2813 				goto retry;
2814 		}
2815 
2816 		bogus = 0;
2817 		for (i = 0; i < bp->b_npages; i++) {
2818 			vm_page_t m = bp->b_pages[i];
2819 
2820 			vm_page_flag_clear(m, PG_ZERO);
2821 			if ((bp->b_flags & B_CLUSTER) == 0) {
2822 				vm_object_pip_add(obj, 1);
2823 				vm_page_io_start(m);
2824 			}
2825 
2826 			/*
2827 			 * When readying a buffer for a read ( i.e
2828 			 * clear_modify == 0 ), it is important to do
2829 			 * bogus_page replacement for valid pages in
2830 			 * partially instantiated buffers.  Partially
2831 			 * instantiated buffers can, in turn, occur when
2832 			 * reconstituting a buffer from its VM backing store
2833 			 * base.  We only have to do this if B_CACHE is
2834 			 * clear ( which causes the I/O to occur in the
2835 			 * first place ).  The replacement prevents the read
2836 			 * I/O from overwriting potentially dirty VM-backed
2837 			 * pages.  XXX bogus page replacement is, uh, bogus.
2838 			 * It may not work properly with small-block devices.
2839 			 * We need to find a better way.
2840 			 */
2841 
2842 			vm_page_protect(m, VM_PROT_NONE);
2843 			if (clear_modify)
2844 				vfs_page_set_valid(bp, foff, i, m);
2845 			else if (m->valid == VM_PAGE_BITS_ALL &&
2846 				(bp->b_flags & B_CACHE) == 0) {
2847 				bp->b_pages[i] = bogus_page;
2848 				bogus++;
2849 			}
2850 			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2851 		}
2852 		if (bogus)
2853 			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2854 	}
2855 }
2856 
2857 /*
2858  * Tell the VM system that the pages associated with this buffer
2859  * are clean.  This is used for delayed writes where the data is
2860  * going to go to disk eventually without additional VM intevention.
2861  *
2862  * Note that while we only really need to clean through to b_bcount, we
2863  * just go ahead and clean through to b_bufsize.
2864  */
2865 static void
2866 vfs_clean_pages(struct buf * bp)
2867 {
2868 	int i;
2869 
2870 	if (bp->b_flags & B_VMIO) {
2871 		vm_ooffset_t foff;
2872 
2873 		foff = bp->b_offset;
2874 		KASSERT(bp->b_offset != NOOFFSET,
2875 		    ("vfs_clean_pages: no buffer offset"));
2876 		for (i = 0; i < bp->b_npages; i++) {
2877 			vm_page_t m = bp->b_pages[i];
2878 			vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2879 			vm_ooffset_t eoff = noff;
2880 
2881 			if (eoff > bp->b_offset + bp->b_bufsize)
2882 				eoff = bp->b_offset + bp->b_bufsize;
2883 			vfs_page_set_valid(bp, foff, i, m);
2884 			/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2885 			foff = noff;
2886 		}
2887 	}
2888 }
2889 
2890 /*
2891  *	vfs_bio_set_validclean:
2892  *
2893  *	Set the range within the buffer to valid and clean.  The range is
2894  *	relative to the beginning of the buffer, b_offset.  Note that b_offset
2895  *	itself may be offset from the beginning of the first page.
2896  */
2897 
2898 void
2899 vfs_bio_set_validclean(struct buf *bp, int base, int size)
2900 {
2901 	if (bp->b_flags & B_VMIO) {
2902 		int i;
2903 		int n;
2904 
2905 		/*
2906 		 * Fixup base to be relative to beginning of first page.
2907 		 * Set initial n to be the maximum number of bytes in the
2908 		 * first page that can be validated.
2909 		 */
2910 
2911 		base += (bp->b_offset & PAGE_MASK);
2912 		n = PAGE_SIZE - (base & PAGE_MASK);
2913 
2914 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
2915 			vm_page_t m = bp->b_pages[i];
2916 
2917 			if (n > size)
2918 				n = size;
2919 
2920 			vm_page_set_validclean(m, base & PAGE_MASK, n);
2921 			base += n;
2922 			size -= n;
2923 			n = PAGE_SIZE;
2924 		}
2925 	}
2926 }
2927 
2928 /*
2929  *	vfs_bio_clrbuf:
2930  *
2931  *	clear a buffer.  This routine essentially fakes an I/O, so we need
2932  *	to clear B_ERROR and B_INVAL.
2933  *
2934  *	Note that while we only theoretically need to clear through b_bcount,
2935  *	we go ahead and clear through b_bufsize.
2936  */
2937 
2938 void
2939 vfs_bio_clrbuf(struct buf *bp) {
2940 	int i, mask = 0;
2941 	caddr_t sa, ea;
2942 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2943 		bp->b_flags &= ~(B_INVAL|B_ERROR);
2944 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2945 		    (bp->b_offset & PAGE_MASK) == 0) {
2946 			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2947 			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2948 			    ((bp->b_pages[0]->valid & mask) != mask)) {
2949 				bzero(bp->b_data, bp->b_bufsize);
2950 			}
2951 			bp->b_pages[0]->valid |= mask;
2952 			bp->b_resid = 0;
2953 			return;
2954 		}
2955 		ea = sa = bp->b_data;
2956 		for(i=0;i<bp->b_npages;i++,sa=ea) {
2957 			int j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
2958 			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
2959 			ea = (caddr_t)(vm_offset_t)ulmin(
2960 			    (u_long)(vm_offset_t)ea,
2961 			    (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
2962 			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
2963 			if ((bp->b_pages[i]->valid & mask) == mask)
2964 				continue;
2965 			if ((bp->b_pages[i]->valid & mask) == 0) {
2966 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2967 					bzero(sa, ea - sa);
2968 				}
2969 			} else {
2970 				for (; sa < ea; sa += DEV_BSIZE, j++) {
2971 					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
2972 						(bp->b_pages[i]->valid & (1<<j)) == 0)
2973 						bzero(sa, DEV_BSIZE);
2974 				}
2975 			}
2976 			bp->b_pages[i]->valid |= mask;
2977 			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2978 		}
2979 		bp->b_resid = 0;
2980 	} else {
2981 		clrbuf(bp);
2982 	}
2983 }
2984 
2985 /*
2986  * vm_hold_load_pages and vm_hold_unload pages get pages into
2987  * a buffers address space.  The pages are anonymous and are
2988  * not associated with a file object.
2989  */
2990 void
2991 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2992 {
2993 	vm_offset_t pg;
2994 	vm_page_t p;
2995 	int index;
2996 
2997 	to = round_page(to);
2998 	from = round_page(from);
2999 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3000 
3001 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3002 
3003 tryagain:
3004 
3005 		p = vm_page_alloc(kernel_object,
3006 			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
3007 		    VM_ALLOC_NORMAL);
3008 		if (!p) {
3009 			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
3010 			VM_WAIT;
3011 			goto tryagain;
3012 		}
3013 		vm_page_wire(p);
3014 		p->valid = VM_PAGE_BITS_ALL;
3015 		vm_page_flag_clear(p, PG_ZERO);
3016 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
3017 		bp->b_pages[index] = p;
3018 		vm_page_wakeup(p);
3019 	}
3020 	bp->b_npages = index;
3021 }
3022 
3023 void
3024 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
3025 {
3026 	vm_offset_t pg;
3027 	vm_page_t p;
3028 	int index, newnpages;
3029 
3030 	from = round_page(from);
3031 	to = round_page(to);
3032 	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
3033 
3034 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
3035 		p = bp->b_pages[index];
3036 		if (p && (index < bp->b_npages)) {
3037 #if !defined(MAX_PERF)
3038 			if (p->busy) {
3039 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
3040 					bp->b_blkno, bp->b_lblkno);
3041 			}
3042 #endif
3043 			bp->b_pages[index] = NULL;
3044 			pmap_kremove(pg);
3045 			vm_page_busy(p);
3046 			vm_page_unwire(p, 0);
3047 			vm_page_free(p);
3048 		}
3049 	}
3050 	bp->b_npages = newnpages;
3051 }
3052 
3053 
3054 #include "opt_ddb.h"
3055 #ifdef DDB
3056 #include <ddb/ddb.h>
3057 
3058 DB_SHOW_COMMAND(buffer, db_show_buffer)
3059 {
3060 	/* get args */
3061 	struct buf *bp = (struct buf *)addr;
3062 
3063 	if (!have_addr) {
3064 		db_printf("usage: show buffer <addr>\n");
3065 		return;
3066 	}
3067 
3068 	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
3069 	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
3070 		  "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, "
3071 		  "b_blkno = %d, b_pblkno = %d\n",
3072 		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
3073 		  major(bp->b_dev), minor(bp->b_dev),
3074 		  bp->b_data, bp->b_blkno, bp->b_pblkno);
3075 	if (bp->b_npages) {
3076 		int i;
3077 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
3078 		for (i = 0; i < bp->b_npages; i++) {
3079 			vm_page_t m;
3080 			m = bp->b_pages[i];
3081 			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3082 			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3083 			if ((i + 1) < bp->b_npages)
3084 				db_printf(",");
3085 		}
3086 		db_printf("\n");
3087 	}
3088 }
3089 #endif /* DDB */
3090