xref: /freebsd/sys/kern/vfs_bio.c (revision 6e8394b8baa7d5d9153ab90de6824bcd19b3b4e1)
1 /*
2  * Copyright (c) 1994,1997 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Absolutely no warranty of function or purpose is made by the author
12  *		John S. Dyson.
13  *
14  * $Id: vfs_bio.c,v 1.214 1999/06/16 23:27:31 mckusick Exp $
15  */
16 
17 /*
18  * this file contains a new buffer I/O scheme implementing a coherent
19  * VM object and buffer cache scheme.  Pains have been taken to make
20  * sure that the performance degradation associated with schemes such
21  * as this is not realized.
22  *
23  * Author:  John S. Dyson
24  * Significant help during the development and debugging phases
25  * had been provided by David Greenman, also of the FreeBSD core team.
26  *
27  * see man buf(9) for more info.
28  */
29 
30 #define VMIO
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysproto.h>
34 #include <sys/kernel.h>
35 #include <sys/sysctl.h>
36 #include <sys/proc.h>
37 #include <sys/vnode.h>
38 #include <sys/vmmeter.h>
39 #include <sys/lock.h>
40 #include <miscfs/specfs/specdev.h>
41 #include <vm/vm.h>
42 #include <vm/vm_param.h>
43 #include <vm/vm_prot.h>
44 #include <vm/vm_kern.h>
45 #include <vm/vm_pageout.h>
46 #include <vm/vm_page.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_map.h>
50 #include <sys/buf.h>
51 #include <sys/mount.h>
52 #include <sys/malloc.h>
53 #include <sys/resourcevar.h>
54 
55 static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer");
56 
57 struct	bio_ops bioops;		/* I/O operation notification */
58 
59 #if 0 	/* replaced bu sched_sync */
60 static void vfs_update __P((void));
61 static struct	proc *updateproc;
62 static struct kproc_desc up_kp = {
63 	"update",
64 	vfs_update,
65 	&updateproc
66 };
67 SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
68 #endif
69 
70 struct buf *buf;		/* buffer header pool */
71 struct swqueue bswlist;
72 
73 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
74 		vm_offset_t to);
75 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
76 		vm_offset_t to);
77 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
78 			       int pageno, vm_page_t m);
79 static void vfs_clean_pages(struct buf * bp);
80 static void vfs_setdirty(struct buf *bp);
81 static void vfs_vmio_release(struct buf *bp);
82 static void flushdirtybuffers(int slpflag, int slptimeo);
83 static int flushbufqueues(void);
84 
85 /*
86  * Internal update daemon, process 3
87  *	The variable vfs_update_wakeup allows for internal syncs.
88  */
89 int vfs_update_wakeup;
90 
91 /*
92  * bogus page -- for I/O to/from partially complete buffers
93  * this is a temporary solution to the problem, but it is not
94  * really that bad.  it would be better to split the buffer
95  * for input in the case of buffers partially already in memory,
96  * but the code is intricate enough already.
97  */
98 vm_page_t bogus_page;
99 int runningbufspace;
100 static vm_offset_t bogus_offset;
101 
102 static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
103 	bufmallocspace, maxbufmallocspace, hibufspace;
104 static int needsbuffer;
105 static int numdirtybuffers, lodirtybuffers, hidirtybuffers;
106 static int numfreebuffers, lofreebuffers, hifreebuffers;
107 static int kvafreespace;
108 
109 SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD,
110 	&numdirtybuffers, 0, "");
111 SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW,
112 	&lodirtybuffers, 0, "");
113 SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW,
114 	&hidirtybuffers, 0, "");
115 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD,
116 	&numfreebuffers, 0, "");
117 SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW,
118 	&lofreebuffers, 0, "");
119 SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW,
120 	&hifreebuffers, 0, "");
121 SYSCTL_INT(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD,
122 	&runningbufspace, 0, "");
123 SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW,
124 	&maxbufspace, 0, "");
125 SYSCTL_INT(_vfs, OID_AUTO, hibufspace, CTLFLAG_RD,
126 	&hibufspace, 0, "");
127 SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD,
128 	&bufspace, 0, "");
129 SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW,
130 	&maxvmiobufspace, 0, "");
131 SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD,
132 	&vmiospace, 0, "");
133 SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW,
134 	&maxbufmallocspace, 0, "");
135 SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD,
136 	&bufmallocspace, 0, "");
137 SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD,
138 	&kvafreespace, 0, "");
139 
140 static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash;
141 struct bqueues bufqueues[BUFFER_QUEUES] = { { 0 } };
142 
143 extern int vm_swap_size;
144 
145 #define BUF_MAXUSE		24
146 
147 #define VFS_BIO_NEED_ANY	0x01	/* any freeable buffer */
148 #define VFS_BIO_NEED_RESERVED02	0x02	/* unused */
149 #define VFS_BIO_NEED_FREE	0x04	/* wait for free bufs, hi hysteresis */
150 #define VFS_BIO_NEED_BUFSPACE	0x08	/* wait for buf space, lo hysteresis */
151 #define VFS_BIO_NEED_KVASPACE	0x10	/* wait for buffer_map space, emerg  */
152 
153 /*
154  *	kvaspacewakeup:
155  *
156  *	Called when kva space is potential available for recovery or when
157  *	kva space is recovered in the buffer_map.  This function wakes up
158  *	anyone waiting for buffer_map kva space.  Even though the buffer_map
159  *	is larger then maxbufspace, this situation will typically occur
160  *	when the buffer_map gets fragmented.
161  */
162 
163 static __inline void
164 kvaspacewakeup(void)
165 {
166 	/*
167 	 * If someone is waiting for KVA space, wake them up.  Even
168 	 * though we haven't freed the kva space yet, the waiting
169 	 * process will be able to now.
170 	 */
171 	if (needsbuffer & VFS_BIO_NEED_KVASPACE) {
172 		needsbuffer &= ~VFS_BIO_NEED_KVASPACE;
173 		wakeup(&needsbuffer);
174 	}
175 }
176 
177 /*
178  *	bufspacewakeup:
179  *
180  *	Called when buffer space is potentially available for recovery or when
181  *	buffer space is recovered.  getnewbuf() will block on this flag when
182  *	it is unable to free sufficient buffer space.  Buffer space becomes
183  *	recoverable when bp's get placed back in the queues.
184  */
185 
186 static __inline void
187 bufspacewakeup(void)
188 {
189 	/*
190 	 * If someone is waiting for BUF space, wake them up.  Even
191 	 * though we haven't freed the kva space yet, the waiting
192 	 * process will be able to now.
193 	 */
194 	if (needsbuffer & VFS_BIO_NEED_BUFSPACE) {
195 		needsbuffer &= ~VFS_BIO_NEED_BUFSPACE;
196 		wakeup(&needsbuffer);
197 	}
198 }
199 
200 /*
201  *	bufcountwakeup:
202  *
203  *	Called when a buffer has been added to one of the free queues to
204  *	account for the buffer and to wakeup anyone waiting for free buffers.
205  *	This typically occurs when large amounts of metadata are being handled
206  *	by the buffer cache ( else buffer space runs out first, usually ).
207  */
208 
209 static __inline void
210 bufcountwakeup(void)
211 {
212 	++numfreebuffers;
213 	if (needsbuffer) {
214 		needsbuffer &= ~VFS_BIO_NEED_ANY;
215 		if (numfreebuffers >= hifreebuffers)
216 			needsbuffer &= ~VFS_BIO_NEED_FREE;
217 		wakeup(&needsbuffer);
218 	}
219 }
220 
221 /*
222  *	vfs_buf_test_cache:
223  *
224  *	Called when a buffer is extended.  This function clears the B_CACHE
225  *	bit if the newly extended portion of the buffer does not contain
226  *	valid data.
227  */
228 static __inline__
229 void
230 vfs_buf_test_cache(struct buf *bp,
231 		  vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
232 		  vm_page_t m)
233 {
234 	if (bp->b_flags & B_CACHE) {
235 		int base = (foff + off) & PAGE_MASK;
236 		if (vm_page_is_valid(m, base, size) == 0)
237 			bp->b_flags &= ~B_CACHE;
238 	}
239 }
240 
241 
242 /*
243  * Initialize buffer headers and related structures.
244  */
245 void
246 bufinit()
247 {
248 	struct buf *bp;
249 	int i;
250 
251 	TAILQ_INIT(&bswlist);
252 	LIST_INIT(&invalhash);
253 
254 	/* first, make a null hash table */
255 	for (i = 0; i < BUFHSZ; i++)
256 		LIST_INIT(&bufhashtbl[i]);
257 
258 	/* next, make a null set of free lists */
259 	for (i = 0; i < BUFFER_QUEUES; i++)
260 		TAILQ_INIT(&bufqueues[i]);
261 
262 	/* finally, initialize each buffer header and stick on empty q */
263 	for (i = 0; i < nbuf; i++) {
264 		bp = &buf[i];
265 		bzero(bp, sizeof *bp);
266 		bp->b_flags = B_INVAL;	/* we're just an empty header */
267 		bp->b_dev = NODEV;
268 		bp->b_rcred = NOCRED;
269 		bp->b_wcred = NOCRED;
270 		bp->b_qindex = QUEUE_EMPTY;
271 		bp->b_xflags = 0;
272 		LIST_INIT(&bp->b_dep);
273 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
274 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
275 	}
276 
277 	/*
278 	 * maxbufspace is currently calculated to support all filesystem
279 	 * blocks to be 8K.  If you happen to use a 16K filesystem, the size
280 	 * of the buffer cache is still the same as it would be for 8K
281 	 * filesystems.  This keeps the size of the buffer cache "in check"
282 	 * for big block filesystems.
283 	 *
284 	 * maxbufspace is calculated as around 50% of the KVA available in
285 	 * the buffer_map ( DFLTSIZE vs BKVASIZE ), I presume to reduce the
286 	 * effect of fragmentation.
287 	 */
288 	maxbufspace = (nbuf + 8) * DFLTBSIZE;
289 	if ((hibufspace = maxbufspace - MAXBSIZE * 5) <= MAXBSIZE)
290 		hibufspace = 3 * maxbufspace / 4;
291 /*
292  * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
293  */
294 	maxvmiobufspace = 2 * hibufspace / 3;
295 /*
296  * Limit the amount of malloc memory since it is wired permanently into
297  * the kernel space.  Even though this is accounted for in the buffer
298  * allocation, we don't want the malloced region to grow uncontrolled.
299  * The malloc scheme improves memory utilization significantly on average
300  * (small) directories.
301  */
302 	maxbufmallocspace = hibufspace / 20;
303 
304 /*
305  * Reduce the chance of a deadlock occuring by limiting the number
306  * of delayed-write dirty buffers we allow to stack up.
307  */
308 	lodirtybuffers = nbuf / 16 + 10;
309 	hidirtybuffers = nbuf / 8 + 20;
310 	numdirtybuffers = 0;
311 
312 /*
313  * Try to keep the number of free buffers in the specified range,
314  * and give the syncer access to an emergency reserve.
315  */
316 	lofreebuffers = nbuf / 18 + 5;
317 	hifreebuffers = 2 * lofreebuffers;
318 	numfreebuffers = nbuf;
319 
320 	kvafreespace = 0;
321 
322 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
323 	bogus_page = vm_page_alloc(kernel_object,
324 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
325 			VM_ALLOC_NORMAL);
326 
327 }
328 
329 /*
330  * Free the kva allocation for a buffer
331  * Must be called only at splbio or higher,
332  *  as this is the only locking for buffer_map.
333  */
334 static void
335 bfreekva(struct buf * bp)
336 {
337 	if (bp->b_kvasize) {
338 		vm_map_delete(buffer_map,
339 		    (vm_offset_t) bp->b_kvabase,
340 		    (vm_offset_t) bp->b_kvabase + bp->b_kvasize
341 		);
342 		bp->b_kvasize = 0;
343 		kvaspacewakeup();
344 	}
345 }
346 
347 /*
348  *	bremfree:
349  *
350  *	Remove the buffer from the appropriate free list.
351  */
352 void
353 bremfree(struct buf * bp)
354 {
355 	int s = splbio();
356 	int old_qindex = bp->b_qindex;
357 
358 	if (bp->b_qindex != QUEUE_NONE) {
359 		if (bp->b_qindex == QUEUE_EMPTY) {
360 			kvafreespace -= bp->b_kvasize;
361 		}
362 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
363 		bp->b_qindex = QUEUE_NONE;
364 		runningbufspace += bp->b_bufsize;
365 	} else {
366 #if !defined(MAX_PERF)
367 		panic("bremfree: removing a buffer when not on a queue");
368 #endif
369 	}
370 
371 	/*
372 	 * Fixup numfreebuffers count.  If the buffer is invalid or not
373 	 * delayed-write, and it was on the EMPTY, LRU, or AGE queues,
374 	 * the buffer was free and we must decrement numfreebuffers.
375 	 */
376 	if ((bp->b_flags & B_INVAL) || (bp->b_flags & B_DELWRI) == 0) {
377 		switch(old_qindex) {
378 		case QUEUE_EMPTY:
379 		case QUEUE_LRU:
380 		case QUEUE_AGE:
381 			--numfreebuffers;
382 			break;
383 		default:
384 			break;
385 		}
386 	}
387 	splx(s);
388 }
389 
390 
391 /*
392  * Get a buffer with the specified data.  Look in the cache first.  We
393  * must clear B_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
394  * is set, the buffer is valid and we do not have to do anything ( see
395  * getblk() ).
396  */
397 int
398 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
399     struct buf ** bpp)
400 {
401 	struct buf *bp;
402 
403 	bp = getblk(vp, blkno, size, 0, 0);
404 	*bpp = bp;
405 
406 	/* if not found in cache, do some I/O */
407 	if ((bp->b_flags & B_CACHE) == 0) {
408 		if (curproc != NULL)
409 			curproc->p_stats->p_ru.ru_inblock++;
410 		KASSERT(!(bp->b_flags & B_ASYNC), ("bread: illegal async bp %p", bp));
411 		bp->b_flags |= B_READ;
412 		bp->b_flags &= ~(B_ERROR | B_INVAL);
413 		if (bp->b_rcred == NOCRED) {
414 			if (cred != NOCRED)
415 				crhold(cred);
416 			bp->b_rcred = cred;
417 		}
418 		vfs_busy_pages(bp, 0);
419 		VOP_STRATEGY(vp, bp);
420 		return (biowait(bp));
421 	}
422 	return (0);
423 }
424 
425 /*
426  * Operates like bread, but also starts asynchronous I/O on
427  * read-ahead blocks.  We must clear B_ERROR and B_INVAL prior
428  * to initiating I/O . If B_CACHE is set, the buffer is valid
429  * and we do not have to do anything.
430  */
431 int
432 breadn(struct vnode * vp, daddr_t blkno, int size,
433     daddr_t * rablkno, int *rabsize,
434     int cnt, struct ucred * cred, struct buf ** bpp)
435 {
436 	struct buf *bp, *rabp;
437 	int i;
438 	int rv = 0, readwait = 0;
439 
440 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
441 
442 	/* if not found in cache, do some I/O */
443 	if ((bp->b_flags & B_CACHE) == 0) {
444 		if (curproc != NULL)
445 			curproc->p_stats->p_ru.ru_inblock++;
446 		bp->b_flags |= B_READ;
447 		bp->b_flags &= ~(B_ERROR | B_INVAL);
448 		if (bp->b_rcred == NOCRED) {
449 			if (cred != NOCRED)
450 				crhold(cred);
451 			bp->b_rcred = cred;
452 		}
453 		vfs_busy_pages(bp, 0);
454 		VOP_STRATEGY(vp, bp);
455 		++readwait;
456 	}
457 
458 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
459 		if (inmem(vp, *rablkno))
460 			continue;
461 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
462 
463 		if ((rabp->b_flags & B_CACHE) == 0) {
464 			if (curproc != NULL)
465 				curproc->p_stats->p_ru.ru_inblock++;
466 			rabp->b_flags |= B_READ | B_ASYNC;
467 			rabp->b_flags &= ~(B_ERROR | B_INVAL);
468 			if (rabp->b_rcred == NOCRED) {
469 				if (cred != NOCRED)
470 					crhold(cred);
471 				rabp->b_rcred = cred;
472 			}
473 			vfs_busy_pages(rabp, 0);
474 			VOP_STRATEGY(vp, rabp);
475 		} else {
476 			brelse(rabp);
477 		}
478 	}
479 
480 	if (readwait) {
481 		rv = biowait(bp);
482 	}
483 	return (rv);
484 }
485 
486 /*
487  * Write, release buffer on completion.  (Done by iodone
488  * if async).  Do not bother writing anything if the buffer
489  * is invalid.
490  *
491  * Note that we set B_CACHE here, indicating that buffer is
492  * fully valid and thus cacheable.  This is true even of NFS
493  * now so we set it generally.  This could be set either here
494  * or in biodone() since the I/O is synchronous.  We put it
495  * here.
496  */
497 int
498 bwrite(struct buf * bp)
499 {
500 	int oldflags, s;
501 	struct vnode *vp;
502 	struct mount *mp;
503 
504 	if (bp->b_flags & B_INVAL) {
505 		brelse(bp);
506 		return (0);
507 	}
508 
509 	oldflags = bp->b_flags;
510 
511 #if !defined(MAX_PERF)
512 	if ((bp->b_flags & B_BUSY) == 0)
513 		panic("bwrite: buffer is not busy???");
514 #endif
515 	s = splbio();
516 	bundirty(bp);
517 
518 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
519 	bp->b_flags |= B_WRITEINPROG | B_CACHE;
520 
521 	bp->b_vp->v_numoutput++;
522 	vfs_busy_pages(bp, 1);
523 	if (curproc != NULL)
524 		curproc->p_stats->p_ru.ru_oublock++;
525 	splx(s);
526 	VOP_STRATEGY(bp->b_vp, bp);
527 
528 	/*
529 	 * Collect statistics on synchronous and asynchronous writes.
530 	 * Writes to block devices are charged to their associated
531 	 * filesystem (if any).
532 	 */
533 	if ((vp = bp->b_vp) != NULL) {
534 		if (vp->v_type == VBLK)
535 			mp = vp->v_specmountpoint;
536 		else
537 			mp = vp->v_mount;
538 		if (mp != NULL) {
539 			if ((oldflags & B_ASYNC) == 0)
540 				mp->mnt_stat.f_syncwrites++;
541 			else
542 				mp->mnt_stat.f_asyncwrites++;
543 		}
544 	}
545 
546 	if ((oldflags & B_ASYNC) == 0) {
547 		int rtval = biowait(bp);
548 		brelse(bp);
549 		return (rtval);
550 	}
551 
552 	return (0);
553 }
554 
555 /*
556  * Delayed write. (Buffer is marked dirty).  Do not bother writing
557  * anything if the buffer is marked invalid.
558  *
559  * Note that since the buffer must be completely valid, we can safely
560  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
561  * biodone() in order to prevent getblk from writing the buffer
562  * out synchronously.
563  */
564 void
565 bdwrite(struct buf * bp)
566 {
567 	struct vnode *vp;
568 
569 #if !defined(MAX_PERF)
570 	if ((bp->b_flags & B_BUSY) == 0) {
571 		panic("bdwrite: buffer is not busy");
572 	}
573 #endif
574 
575 	if (bp->b_flags & B_INVAL) {
576 		brelse(bp);
577 		return;
578 	}
579 	bdirty(bp);
580 
581 	/*
582 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
583 	 * true even of NFS now.
584 	 */
585 	bp->b_flags |= B_CACHE;
586 
587 	/*
588 	 * This bmap keeps the system from needing to do the bmap later,
589 	 * perhaps when the system is attempting to do a sync.  Since it
590 	 * is likely that the indirect block -- or whatever other datastructure
591 	 * that the filesystem needs is still in memory now, it is a good
592 	 * thing to do this.  Note also, that if the pageout daemon is
593 	 * requesting a sync -- there might not be enough memory to do
594 	 * the bmap then...  So, this is important to do.
595 	 */
596 	if (bp->b_lblkno == bp->b_blkno) {
597 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
598 	}
599 
600 	/*
601 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
602 	 */
603 	vfs_setdirty(bp);
604 
605 	/*
606 	 * We need to do this here to satisfy the vnode_pager and the
607 	 * pageout daemon, so that it thinks that the pages have been
608 	 * "cleaned".  Note that since the pages are in a delayed write
609 	 * buffer -- the VFS layer "will" see that the pages get written
610 	 * out on the next sync, or perhaps the cluster will be completed.
611 	 */
612 	vfs_clean_pages(bp);
613 	bqrelse(bp);
614 
615 	/*
616 	 * XXX The soft dependency code is not prepared to
617 	 * have I/O done when a bdwrite is requested. For
618 	 * now we just let the write be delayed if it is
619 	 * requested by the soft dependency code.
620 	 */
621 	if ((vp = bp->b_vp) &&
622 	    ((vp->v_type == VBLK && vp->v_specmountpoint &&
623 		  (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) ||
624 		 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))))
625 		return;
626 
627 	if (numdirtybuffers >= hidirtybuffers)
628 		flushdirtybuffers(0, 0);
629 }
630 
631 /*
632  *	bdirty:
633  *
634  *	Turn buffer into delayed write request.  We must clear B_READ and
635  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
636  *	itself to properly update it in the dirty/clean lists.  We mark it
637  *	B_DONE to ensure that any asynchronization of the buffer properly
638  *	clears B_DONE ( else a panic will occur later ).
639  *
640  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
641  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
642  *	should only be called if the buffer is known-good.
643  *
644  *	Since the buffer is not on a queue, we do not update the numfreebuffers
645  *	count.
646  *
647  *	Must be called at splbio().
648  *	The buffer must be on QUEUE_NONE.
649  */
650 void
651 bdirty(bp)
652 	struct buf *bp;
653 {
654 	KASSERT(bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
655 	bp->b_flags &= ~(B_READ|B_RELBUF);
656 
657 	if ((bp->b_flags & B_DELWRI) == 0) {
658 		bp->b_flags |= B_DONE | B_DELWRI;
659 		reassignbuf(bp, bp->b_vp);
660 		++numdirtybuffers;
661 	}
662 }
663 
664 /*
665  *	bundirty:
666  *
667  *	Clear B_DELWRI for buffer.
668  *
669  *	Since the buffer is not on a queue, we do not update the numfreebuffers
670  *	count.
671  *
672  *	Must be called at splbio().
673  *	The buffer must be on QUEUE_NONE.
674  */
675 
676 void
677 bundirty(bp)
678 	struct buf *bp;
679 {
680 	KASSERT(bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
681 
682 	if (bp->b_flags & B_DELWRI) {
683 		bp->b_flags &= ~B_DELWRI;
684 		reassignbuf(bp, bp->b_vp);
685 		--numdirtybuffers;
686 	}
687 }
688 
689 /*
690  *	bawrite:
691  *
692  *	Asynchronous write.  Start output on a buffer, but do not wait for
693  *	it to complete.  The buffer is released when the output completes.
694  *
695  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
696  *	B_INVAL buffers.  Not us.
697  */
698 void
699 bawrite(struct buf * bp)
700 {
701 	bp->b_flags |= B_ASYNC;
702 	(void) VOP_BWRITE(bp->b_vp, bp);
703 }
704 
705 /*
706  *	bowrite:
707  *
708  *	Ordered write.  Start output on a buffer, and flag it so that the
709  *	device will write it in the order it was queued.  The buffer is
710  *	released when the output completes.  bwrite() ( or the VOP routine
711  *	anyway ) is responsible for handling B_INVAL buffers.
712  */
713 int
714 bowrite(struct buf * bp)
715 {
716 	bp->b_flags |= B_ORDERED | B_ASYNC;
717 	return (VOP_BWRITE(bp->b_vp, bp));
718 }
719 
720 /*
721  *	brelse:
722  *
723  *	Release a busy buffer and, if requested, free its resources.  The
724  *	buffer will be stashed in the appropriate bufqueue[] allowing it
725  *	to be accessed later as a cache entity or reused for other purposes.
726  */
727 void
728 brelse(struct buf * bp)
729 {
730 	int s;
731 
732 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
733 
734 #if 0
735 	if (bp->b_flags & B_CLUSTER) {
736 		relpbuf(bp, NULL);
737 		return;
738 	}
739 #endif
740 
741 	s = splbio();
742 
743 	if (bp->b_flags & B_LOCKED)
744 		bp->b_flags &= ~B_ERROR;
745 
746 	if ((bp->b_flags & (B_READ | B_ERROR)) == B_ERROR) {
747 		/*
748 		 * Failed write, redirty.  Must clear B_ERROR to prevent
749 		 * pages from being scrapped.  Note: B_INVAL is ignored
750 		 * here but will presumably be dealt with later.
751 		 */
752 		bp->b_flags &= ~B_ERROR;
753 		bdirty(bp);
754 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) ||
755 	    (bp->b_bufsize <= 0)) {
756 		/*
757 		 * Either a failed I/O or we were asked to free or not
758 		 * cache the buffer.
759 		 */
760 		bp->b_flags |= B_INVAL;
761 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
762 			(*bioops.io_deallocate)(bp);
763 		if (bp->b_flags & B_DELWRI)
764 			--numdirtybuffers;
765 		bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF);
766 		if ((bp->b_flags & B_VMIO) == 0) {
767 			if (bp->b_bufsize)
768 				allocbuf(bp, 0);
769 			if (bp->b_vp)
770 				brelvp(bp);
771 		}
772 	}
773 
774 	/*
775 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_release()
776 	 * is called with B_DELWRI set, the underlying pages may wind up
777 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
778 	 * because pages associated with a B_DELWRI bp are marked clean.
779 	 *
780 	 * We still allow the B_INVAL case to call vfs_vmio_release(), even
781 	 * if B_DELWRI is set.
782 	 */
783 
784 	if (bp->b_flags & B_DELWRI)
785 		bp->b_flags &= ~B_RELBUF;
786 
787 	/*
788 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
789 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
790 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
791 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
792 	 *
793 	 * If B_ERROR or B_NOCACHE is set, pages in the VM object will be
794 	 * invalidated.  B_ERROR cannot be set for a failed write unless the
795 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
796 	 *
797 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
798 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
799 	 * the commit state and we cannot afford to lose the buffer.
800 	 */
801 	if ((bp->b_flags & B_VMIO)
802 	    && !(bp->b_vp->v_tag == VT_NFS &&
803 		 bp->b_vp->v_type != VBLK &&
804 		 (bp->b_flags & B_DELWRI))
805 	    ) {
806 
807 		int i, j, resid;
808 		vm_page_t m;
809 		off_t foff;
810 		vm_pindex_t poff;
811 		vm_object_t obj;
812 		struct vnode *vp;
813 
814 		vp = bp->b_vp;
815 
816 		/*
817 		 * Get the base offset and length of the buffer.  Note that
818 		 * for block sizes that are less then PAGE_SIZE, the b_data
819 		 * base of the buffer does not represent exactly b_offset and
820 		 * neither b_offset nor b_size are necessarily page aligned.
821 		 * Instead, the starting position of b_offset is:
822 		 *
823 		 * 	b_data + (b_offset & PAGE_MASK)
824 		 *
825 		 * block sizes less then DEV_BSIZE (usually 512) are not
826 		 * supported due to the page granularity bits (m->valid,
827 		 * m->dirty, etc...).
828 		 *
829 		 * See man buf(9) for more information
830 		 */
831 
832 		resid = bp->b_bufsize;
833 		foff = bp->b_offset;
834 
835 		for (i = 0; i < bp->b_npages; i++) {
836 			m = bp->b_pages[i];
837 			vm_page_flag_clear(m, PG_ZERO);
838 			if (m == bogus_page) {
839 
840 				obj = (vm_object_t) vp->v_object;
841 				poff = OFF_TO_IDX(bp->b_offset);
842 
843 				for (j = i; j < bp->b_npages; j++) {
844 					m = bp->b_pages[j];
845 					if (m == bogus_page) {
846 						m = vm_page_lookup(obj, poff + j);
847 #if !defined(MAX_PERF)
848 						if (!m) {
849 							panic("brelse: page missing\n");
850 						}
851 #endif
852 						bp->b_pages[j] = m;
853 					}
854 				}
855 
856 				if ((bp->b_flags & B_INVAL) == 0) {
857 					pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
858 				}
859 			}
860 			if (bp->b_flags & (B_NOCACHE|B_ERROR)) {
861 				int poffset = foff & PAGE_MASK;
862 				int presid = resid > (PAGE_SIZE - poffset) ?
863 					(PAGE_SIZE - poffset) : resid;
864 
865 				KASSERT(presid >= 0, ("brelse: extra page"));
866 				vm_page_set_invalid(m, poffset, presid);
867 			}
868 			resid -= PAGE_SIZE - (foff & PAGE_MASK);
869 			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
870 		}
871 
872 		if (bp->b_flags & (B_INVAL | B_RELBUF))
873 			vfs_vmio_release(bp);
874 
875 	} else if (bp->b_flags & B_VMIO) {
876 
877 		if (bp->b_flags & (B_INVAL | B_RELBUF))
878 			vfs_vmio_release(bp);
879 
880 	}
881 
882 #if !defined(MAX_PERF)
883 	if (bp->b_qindex != QUEUE_NONE)
884 		panic("brelse: free buffer onto another queue???");
885 #endif
886 	/* enqueue */
887 
888 	/* buffers with no memory */
889 	if (bp->b_bufsize == 0) {
890 		bp->b_flags |= B_INVAL;
891 		bp->b_qindex = QUEUE_EMPTY;
892 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
893 		LIST_REMOVE(bp, b_hash);
894 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
895 		bp->b_dev = NODEV;
896 		kvafreespace += bp->b_kvasize;
897 		if (bp->b_kvasize)
898 			kvaspacewakeup();
899 	/* buffers with junk contents */
900 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
901 		bp->b_flags |= B_INVAL;
902 		bp->b_qindex = QUEUE_AGE;
903 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
904 		LIST_REMOVE(bp, b_hash);
905 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
906 		bp->b_dev = NODEV;
907 
908 	/* buffers that are locked */
909 	} else if (bp->b_flags & B_LOCKED) {
910 		bp->b_qindex = QUEUE_LOCKED;
911 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
912 
913 	/* buffers with stale but valid contents */
914 	} else if (bp->b_flags & B_AGE) {
915 		bp->b_qindex = QUEUE_AGE;
916 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
917 
918 	/* buffers with valid and quite potentially reuseable contents */
919 	} else {
920 		bp->b_qindex = QUEUE_LRU;
921 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
922 	}
923 
924 	/*
925 	 * If B_INVAL, clear B_DELWRI.
926 	 */
927 	if ((bp->b_flags & (B_INVAL|B_DELWRI)) == (B_INVAL|B_DELWRI)) {
928 		bp->b_flags &= ~B_DELWRI;
929 		--numdirtybuffers;
930 	}
931 
932 	runningbufspace -= bp->b_bufsize;
933 
934 	/*
935 	 * Fixup numfreebuffers count.  The bp is on an appropriate queue
936 	 * unless locked.  We then bump numfreebuffers if it is not B_DELWRI.
937 	 * We've already handled the B_INVAL case ( B_DELWRI will be clear
938 	 * if B_INVAL is set ).
939 	 */
940 
941 	if ((bp->b_flags & B_LOCKED) == 0 && !(bp->b_flags & B_DELWRI))
942 		bufcountwakeup();
943 
944 	/*
945 	 * Something we can maybe free.
946 	 */
947 
948 	if (bp->b_bufsize)
949 		bufspacewakeup();
950 
951 	if (bp->b_flags & B_WANTED) {
952 		bp->b_flags &= ~(B_WANTED | B_AGE);
953 		wakeup(bp);
954 	}
955 
956 	/* unlock */
957 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
958 		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
959 	splx(s);
960 }
961 
962 /*
963  * Release a buffer back to the appropriate queue but do not try to free
964  * it.
965  *
966  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
967  * biodone() to requeue an async I/O on completion.  It is also used when
968  * known good buffers need to be requeued but we think we may need the data
969  * again soon.
970  */
971 void
972 bqrelse(struct buf * bp)
973 {
974 	int s;
975 
976 	s = splbio();
977 
978 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
979 
980 #if !defined(MAX_PERF)
981 	if (bp->b_qindex != QUEUE_NONE)
982 		panic("bqrelse: free buffer onto another queue???");
983 #endif
984 	if (bp->b_flags & B_LOCKED) {
985 		bp->b_flags &= ~B_ERROR;
986 		bp->b_qindex = QUEUE_LOCKED;
987 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
988 		/* buffers with stale but valid contents */
989 	} else {
990 		bp->b_qindex = QUEUE_LRU;
991 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
992 	}
993 
994 	runningbufspace -= bp->b_bufsize;
995 
996 	if ((bp->b_flags & B_LOCKED) == 0 &&
997 	    ((bp->b_flags & B_INVAL) || !(bp->b_flags & B_DELWRI))
998 	) {
999 		bufcountwakeup();
1000 	}
1001 
1002 	/*
1003 	 * Something we can maybe wakeup
1004 	 */
1005 	if (bp->b_bufsize)
1006 		bufspacewakeup();
1007 
1008 	/* anyone need this block? */
1009 	if (bp->b_flags & B_WANTED) {
1010 		bp->b_flags &= ~(B_WANTED | B_AGE);
1011 		wakeup(bp);
1012 	}
1013 
1014 	/* unlock */
1015 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
1016 		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
1017 	splx(s);
1018 }
1019 
1020 static void
1021 vfs_vmio_release(bp)
1022 	struct buf *bp;
1023 {
1024 	int i, s;
1025 	vm_page_t m;
1026 
1027 	s = splvm();
1028 	for (i = 0; i < bp->b_npages; i++) {
1029 		m = bp->b_pages[i];
1030 		bp->b_pages[i] = NULL;
1031 		/*
1032 		 * In order to keep page LRU ordering consistent, put
1033 		 * everything on the inactive queue.
1034 		 */
1035 		vm_page_unwire(m, 0);
1036 		/*
1037 		 * We don't mess with busy pages, it is
1038 		 * the responsibility of the process that
1039 		 * busied the pages to deal with them.
1040 		 */
1041 		if ((m->flags & PG_BUSY) || (m->busy != 0))
1042 			continue;
1043 
1044 		if (m->wire_count == 0) {
1045 			vm_page_flag_clear(m, PG_ZERO);
1046 			/*
1047 			 * Might as well free the page if we can and it has
1048 			 * no valid data.
1049 			 */
1050 			if ((bp->b_flags & B_ASYNC) == 0 && !m->valid && m->hold_count == 0) {
1051 				vm_page_busy(m);
1052 				vm_page_protect(m, VM_PROT_NONE);
1053 				vm_page_free(m);
1054 			}
1055 		}
1056 	}
1057 	bufspace -= bp->b_bufsize;
1058 	vmiospace -= bp->b_bufsize;
1059 	runningbufspace -= bp->b_bufsize;
1060 	splx(s);
1061 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
1062 	if (bp->b_bufsize)
1063 		bufspacewakeup();
1064 	bp->b_npages = 0;
1065 	bp->b_bufsize = 0;
1066 	bp->b_flags &= ~B_VMIO;
1067 	if (bp->b_vp)
1068 		brelvp(bp);
1069 }
1070 
1071 /*
1072  * Check to see if a block is currently memory resident.
1073  */
1074 struct buf *
1075 gbincore(struct vnode * vp, daddr_t blkno)
1076 {
1077 	struct buf *bp;
1078 	struct bufhashhdr *bh;
1079 
1080 	bh = BUFHASH(vp, blkno);
1081 	bp = bh->lh_first;
1082 
1083 	/* Search hash chain */
1084 	while (bp != NULL) {
1085 		/* hit */
1086 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
1087 		    (bp->b_flags & B_INVAL) == 0) {
1088 			break;
1089 		}
1090 		bp = bp->b_hash.le_next;
1091 	}
1092 	return (bp);
1093 }
1094 
1095 /*
1096  * this routine implements clustered async writes for
1097  * clearing out B_DELWRI buffers...  This is much better
1098  * than the old way of writing only one buffer at a time.
1099  */
1100 int
1101 vfs_bio_awrite(struct buf * bp)
1102 {
1103 	int i;
1104 	daddr_t lblkno = bp->b_lblkno;
1105 	struct vnode *vp = bp->b_vp;
1106 	int s;
1107 	int ncl;
1108 	struct buf *bpa;
1109 	int nwritten;
1110 	int size;
1111 	int maxcl;
1112 
1113 	s = splbio();
1114 	/*
1115 	 * right now we support clustered writing only to regular files, and
1116 	 * then only if our I/O system is not saturated.
1117 	 */
1118 	if ((vp->v_type == VREG) &&
1119 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
1120 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
1121 
1122 		size = vp->v_mount->mnt_stat.f_iosize;
1123 		maxcl = MAXPHYS / size;
1124 
1125 		for (i = 1; i < maxcl; i++) {
1126 			if ((bpa = gbincore(vp, lblkno + i)) &&
1127 			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
1128 			    (B_DELWRI | B_CLUSTEROK)) &&
1129 			    (bpa->b_bufsize == size)) {
1130 				if ((bpa->b_blkno == bpa->b_lblkno) ||
1131 				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
1132 					break;
1133 			} else {
1134 				break;
1135 			}
1136 		}
1137 		ncl = i;
1138 		/*
1139 		 * this is a possible cluster write
1140 		 */
1141 		if (ncl != 1) {
1142 			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
1143 			splx(s);
1144 			return nwritten;
1145 		}
1146 	}
1147 
1148 	bremfree(bp);
1149 	bp->b_flags |= B_BUSY | B_ASYNC;
1150 
1151 	splx(s);
1152 	/*
1153 	 * default (old) behavior, writing out only one block
1154 	 *
1155 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
1156 	 */
1157 	nwritten = bp->b_bufsize;
1158 	(void) VOP_BWRITE(bp->b_vp, bp);
1159 
1160 	return nwritten;
1161 }
1162 
1163 /*
1164  *	getnewbuf:
1165  *
1166  *	Find and initialize a new buffer header, freeing up existing buffers
1167  *	in the bufqueues as necessary.  The new buffer is returned locked.
1168  *
1169  *	Important:  B_INVAL is not set.  If the caller wishes to throw the
1170  *	buffer away, the caller must set B_INVAL prior to calling brelse().
1171  *
1172  *	We block if:
1173  *		We have insufficient buffer headers
1174  *		We have insufficient buffer space
1175  *		buffer_map is too fragmented ( space reservation fails )
1176  *
1177  *	We do *not* attempt to flush dirty buffers more then one level deep.
1178  *	I.e., if P_FLSINPROG is set we do not flush dirty buffers at all.
1179  *
1180  *	If P_FLSINPROG is set, we are allowed to dip into our emergency
1181  *	reserve.
1182  */
1183 static struct buf *
1184 getnewbuf(struct vnode *vp, daddr_t blkno,
1185 	int slpflag, int slptimeo, int size, int maxsize)
1186 {
1187 	struct buf *bp;
1188 	struct buf *nbp;
1189 	struct buf *dbp;
1190 	int outofspace;
1191 	int nqindex;
1192 	int defrag = 0;
1193 	static int newbufcnt = 0;
1194 	int lastnewbuf = newbufcnt;
1195 
1196 restart:
1197 	/*
1198 	 * Calculate whether we are out of buffer space.  This state is
1199 	 * recalculated on every restart.  If we are out of space, we
1200 	 * have to turn off defragmentation.  The outofspace code will
1201 	 * defragment too, but the looping conditionals will be messed up
1202 	 * if both outofspace and defrag are on.
1203 	 */
1204 
1205 	dbp = NULL;
1206 	outofspace = 0;
1207 	if (bufspace >= hibufspace) {
1208 		if ((curproc->p_flag & P_FLSINPROG) == 0 ||
1209 		    bufspace >= maxbufspace
1210 		) {
1211 			outofspace = 1;
1212 			defrag = 0;
1213 		}
1214 	}
1215 
1216 	/*
1217 	 * defrag state is semi-persistant.  1 means we are flagged for
1218 	 * defragging.  -1 means we actually defragged something.
1219 	 */
1220 	/* nop */
1221 
1222 	/*
1223 	 * Setup for scan.  If we do not have enough free buffers,
1224 	 * we setup a degenerate case that falls through the while.
1225 	 *
1226 	 * If we are in the middle of a flush, we can dip into the
1227 	 * emergency reserve.
1228 	 *
1229 	 * If we are out of space, we skip trying to scan QUEUE_EMPTY
1230 	 * because those buffers are, well, empty.
1231 	 */
1232 
1233 	if ((curproc->p_flag & P_FLSINPROG) == 0 &&
1234 	    numfreebuffers < lofreebuffers) {
1235 		nqindex = QUEUE_LRU;
1236 		nbp = NULL;
1237 	} else {
1238 		nqindex = QUEUE_EMPTY;
1239 		if (outofspace ||
1240 		    (nbp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY])) == NULL) {
1241 			nqindex = QUEUE_AGE;
1242 			nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1243 			if (nbp == NULL) {
1244 				nqindex = QUEUE_LRU;
1245 				nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]);
1246 			}
1247 		}
1248 	}
1249 
1250 	/*
1251 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1252 	 * depending.
1253 	 */
1254 
1255 	while ((bp = nbp) != NULL) {
1256 		int qindex = nqindex;
1257 		/*
1258 		 * Calculate next bp ( we can only use it if we do not block
1259 		 * or do other fancy things ).
1260 		 */
1261 		if ((nbp = TAILQ_NEXT(bp, b_freelist)) == NULL) {
1262 			switch(qindex) {
1263 			case QUEUE_EMPTY:
1264 				nqindex = QUEUE_AGE;
1265 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_AGE])))
1266 					break;
1267 				/* fall through */
1268 			case QUEUE_AGE:
1269 				nqindex = QUEUE_LRU;
1270 				if ((nbp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])))
1271 					break;
1272 				/* fall through */
1273 			case QUEUE_LRU:
1274 				/*
1275 				 * nbp is NULL.
1276 				 */
1277 				break;
1278 			}
1279 		}
1280 
1281 		/*
1282 		 * Sanity Checks
1283 		 */
1284 		KASSERT(!(bp->b_flags & B_BUSY), ("getnewbuf: busy buffer %p on free list", bp));
1285 		KASSERT(bp->b_qindex == qindex, ("getnewbuf: inconsistant queue %d bp %p", qindex, bp));
1286 
1287 		/*
1288 		 * Here we try to move NON VMIO buffers to the end of the
1289 		 * LRU queue in order to make VMIO buffers more readily
1290 		 * freeable.  We also try to move buffers with a positive
1291 		 * usecount to the end.
1292 		 *
1293 		 * Note that by moving the bp to the end, we setup a following
1294 		 * loop.  Since we continue to decrement b_usecount this
1295 		 * is ok and, in fact, desireable.
1296 		 *
1297 		 * If we are at the end of the list, we move ourself to the
1298 		 * same place and need to fixup nbp and nqindex to handle
1299 		 * the following case.
1300 		 */
1301 
1302 		if ((qindex == QUEUE_LRU) && bp->b_usecount > 0) {
1303 			if ((bp->b_flags & B_VMIO) == 0 ||
1304 			    (vmiospace < maxvmiobufspace)
1305 			) {
1306 				--bp->b_usecount;
1307 				TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1308 				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1309 				if (nbp == NULL) {
1310 					nqindex = qindex;
1311 					nbp = bp;
1312 				}
1313 				continue;
1314 			}
1315 		}
1316 
1317 		/*
1318 		 * If we come across a delayed write and numdirtybuffers should
1319 		 * be flushed, try to write it out.  Only if P_FLSINPROG is
1320 		 * not set.  We can't afford to recursively stack more then
1321 		 * one deep due to the possibility of having deep VFS call
1322 		 * stacks.
1323 		 *
1324 		 * Limit the number of dirty buffers we are willing to try
1325 		 * to recover since it really isn't our job here.
1326 		 */
1327 		if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
1328 			/*
1329 			 * This is rather complex, but necessary.  If we come
1330 			 * across a B_DELWRI buffer we have to flush it in
1331 			 * order to use it.  We only do this if we absolutely
1332 			 * need to.  We must also protect against too much
1333 			 * recursion which might run us out of stack due to
1334 			 * deep VFS call stacks.
1335 			 *
1336 			 * In heavy-writing situations, QUEUE_LRU can contain
1337 			 * a large number of DELWRI buffers at its head.  These
1338 			 * buffers must be moved to the tail if they cannot be
1339 			 * written async in order to reduce the scanning time
1340 			 * required to skip past these buffers in later
1341 			 * getnewbuf() calls.
1342 			 */
1343 			if ((curproc->p_flag & P_FLSINPROG) ||
1344 			    numdirtybuffers < hidirtybuffers) {
1345 				if (qindex == QUEUE_LRU) {
1346 					/*
1347 					 * dbp prevents us from looping forever
1348 					 * if all bps in QUEUE_LRU are dirty.
1349 					 */
1350 					if (bp == dbp) {
1351 						bp = NULL;
1352 						break;
1353 					}
1354 					if (dbp == NULL)
1355 						dbp = TAILQ_LAST(&bufqueues[QUEUE_LRU], bqueues);
1356 					TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
1357 					TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
1358 				}
1359 				continue;
1360 			}
1361 			curproc->p_flag |= P_FLSINPROG;
1362 			vfs_bio_awrite(bp);
1363 			curproc->p_flag &= ~P_FLSINPROG;
1364 			goto restart;
1365 		}
1366 
1367 		if (defrag > 0 && bp->b_kvasize == 0)
1368 			continue;
1369 		if (outofspace > 0 && bp->b_bufsize == 0)
1370 			continue;
1371 
1372 		/*
1373 		 * Start freeing the bp.  This is somewhat involved.  nbp
1374 		 * remains valid only for QUEUE_EMPTY bp's.
1375 		 */
1376 
1377 		bremfree(bp);
1378 		bp->b_flags |= B_BUSY;
1379 
1380 		if (qindex == QUEUE_LRU || qindex == QUEUE_AGE) {
1381 			if (bp->b_flags & B_VMIO) {
1382 				bp->b_flags &= ~B_ASYNC;
1383 				vfs_vmio_release(bp);
1384 			}
1385 			if (bp->b_vp)
1386 				brelvp(bp);
1387 		}
1388 
1389 		if (bp->b_flags & B_WANTED) {
1390 			bp->b_flags &= ~B_WANTED;
1391 			wakeup(bp);
1392 		}
1393 
1394 		/*
1395 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1396 		 * the scan from this point on.
1397 		 *
1398 		 * Get the rest of the buffer freed up.  b_kva* is still
1399 		 * valid after this operation.
1400 		 */
1401 
1402 		if (bp->b_rcred != NOCRED) {
1403 			crfree(bp->b_rcred);
1404 			bp->b_rcred = NOCRED;
1405 		}
1406 		if (bp->b_wcred != NOCRED) {
1407 			crfree(bp->b_wcred);
1408 			bp->b_wcred = NOCRED;
1409 		}
1410 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
1411 			(*bioops.io_deallocate)(bp);
1412 
1413 		LIST_REMOVE(bp, b_hash);
1414 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
1415 
1416 		if (bp->b_bufsize)
1417 			allocbuf(bp, 0);
1418 
1419 		bp->b_flags = B_BUSY;
1420 		bp->b_dev = NODEV;
1421 		bp->b_vp = NULL;
1422 		bp->b_blkno = bp->b_lblkno = 0;
1423 		bp->b_offset = NOOFFSET;
1424 		bp->b_iodone = 0;
1425 		bp->b_error = 0;
1426 		bp->b_resid = 0;
1427 		bp->b_bcount = 0;
1428 		bp->b_npages = 0;
1429 		bp->b_dirtyoff = bp->b_dirtyend = 0;
1430 		bp->b_usecount = 5;
1431 
1432 		LIST_INIT(&bp->b_dep);
1433 
1434 		/*
1435 		 * Ok, now that we have a free buffer, if we are defragging
1436 		 * we have to recover the kvaspace.
1437 		 */
1438 
1439 		if (defrag > 0) {
1440 			defrag = -1;
1441 			bp->b_flags |= B_INVAL;
1442 			bfreekva(bp);
1443 			brelse(bp);
1444 			goto restart;
1445 		}
1446 
1447 		if (outofspace > 0) {
1448 			outofspace = -1;
1449 			bp->b_flags |= B_INVAL;
1450 			bfreekva(bp);
1451 			brelse(bp);
1452 			goto restart;
1453 		}
1454 
1455 		/*
1456 		 * We are done
1457 		 */
1458 		break;
1459 	}
1460 
1461 	/*
1462 	 * If we exhausted our list, sleep as appropriate.
1463 	 */
1464 
1465 	if (bp == NULL) {
1466 		int flags;
1467 
1468 dosleep:
1469 		if (defrag > 0)
1470 			flags = VFS_BIO_NEED_KVASPACE;
1471 		else if (outofspace > 0)
1472 			flags = VFS_BIO_NEED_BUFSPACE;
1473 		else
1474 			flags = VFS_BIO_NEED_ANY;
1475 
1476 		(void) speedup_syncer();
1477 		needsbuffer |= flags;
1478 		while (needsbuffer & flags) {
1479 			if (tsleep(&needsbuffer, (PRIBIO + 4) | slpflag,
1480 			    "newbuf", slptimeo))
1481 				return (NULL);
1482 		}
1483 	} else {
1484 		/*
1485 		 * We finally have a valid bp.  We aren't quite out of the
1486 		 * woods, we still have to reserve kva space.
1487 		 */
1488 		vm_offset_t addr = 0;
1489 
1490 		maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK;
1491 
1492 		if (maxsize != bp->b_kvasize) {
1493 			bfreekva(bp);
1494 
1495 			if (vm_map_findspace(buffer_map,
1496 				vm_map_min(buffer_map), maxsize, &addr)
1497 			) {
1498 				/*
1499 				 * Uh oh.  Buffer map is to fragmented.  Try
1500 				 * to defragment.
1501 				 */
1502 				if (defrag <= 0) {
1503 					defrag = 1;
1504 					bp->b_flags |= B_INVAL;
1505 					brelse(bp);
1506 					goto restart;
1507 				}
1508 				/*
1509 				 * Uh oh.  We couldn't seem to defragment
1510 				 */
1511 				bp = NULL;
1512 				goto dosleep;
1513 			}
1514 		}
1515 		if (addr) {
1516 			vm_map_insert(buffer_map, NULL, 0,
1517 				addr, addr + maxsize,
1518 				VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
1519 
1520 			bp->b_kvabase = (caddr_t) addr;
1521 			bp->b_kvasize = maxsize;
1522 		}
1523 		bp->b_data = bp->b_kvabase;
1524 	}
1525 
1526 	/*
1527 	 * If we have slept at some point in this process and another
1528 	 * process has managed to allocate a new buffer while we slept,
1529 	 * we have to return NULL so that our caller can recheck to
1530 	 * ensure that the other process did not create an identically
1531 	 * identified buffer to the one we were requesting. We make this
1532 	 * check by incrementing the static int newbufcnt each time we
1533 	 * successfully allocate a new buffer. By saving the value of
1534 	 * newbufcnt in our local lastnewbuf, we can compare newbufcnt
1535 	 * with lastnewbuf to see if any other process managed to
1536 	 * allocate a buffer while we were doing so ourselves.
1537 	 *
1538 	 * Note that bp, if valid, is locked.
1539 	 */
1540 	if (lastnewbuf == newbufcnt) {
1541 		/*
1542 		 * No buffers allocated, so we can return one if we were
1543 		 * successful, or continue trying if we were not successful.
1544 		 */
1545 		if (bp != NULL) {
1546 			newbufcnt += 1;
1547 			return (bp);
1548 		}
1549 		goto restart;
1550 	}
1551 	/*
1552 	 * Another process allocated a buffer since we were called, so
1553 	 * we have to free the one we allocated and return NULL to let
1554 	 * our caller recheck to see if a new buffer is still needed.
1555 	 */
1556 	if (bp != NULL) {
1557 		bp->b_flags |= B_INVAL;
1558 		brelse(bp);
1559 	}
1560 	return (NULL);
1561 }
1562 
1563 /*
1564  *	waitfreebuffers:
1565  *
1566  *	Wait for sufficient free buffers.  This routine is not called if
1567  *	curproc is the update process so we do not have to do anything
1568  *	fancy.
1569  */
1570 
1571 static void
1572 waitfreebuffers(int slpflag, int slptimeo)
1573 {
1574 	while (numfreebuffers < hifreebuffers) {
1575 		flushdirtybuffers(slpflag, slptimeo);
1576 		if (numfreebuffers >= hifreebuffers)
1577 			break;
1578 		needsbuffer |= VFS_BIO_NEED_FREE;
1579 		if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo))
1580 			break;
1581 	}
1582 }
1583 
1584 /*
1585  *	flushdirtybuffers:
1586  *
1587  *	This routine is called when we get too many dirty buffers.
1588  *
1589  *	We have to protect ourselves from recursion, but we also do not want
1590  *	other process's flushdirtybuffers() to interfere with the syncer if
1591  *	it decides to flushdirtybuffers().
1592  *
1593  *	In order to maximize operations, we allow any process to flush
1594  *	dirty buffers and use P_FLSINPROG to prevent recursion.
1595  */
1596 
1597 static void
1598 flushdirtybuffers(int slpflag, int slptimeo)
1599 {
1600 	int s;
1601 
1602 	s = splbio();
1603 
1604 	if (curproc->p_flag & P_FLSINPROG) {
1605 		splx(s);
1606 		return;
1607 	}
1608 	curproc->p_flag |= P_FLSINPROG;
1609 
1610 	while (numdirtybuffers > lodirtybuffers) {
1611 		if (flushbufqueues() == 0)
1612 			break;
1613 	}
1614 
1615 	curproc->p_flag &= ~P_FLSINPROG;
1616 
1617 	splx(s);
1618 }
1619 
1620 static int
1621 flushbufqueues(void)
1622 {
1623 	struct buf *bp;
1624 	int qindex;
1625 	int r = 0;
1626 
1627 	qindex = QUEUE_AGE;
1628 	bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]);
1629 
1630 	for (;;) {
1631 		if (bp == NULL) {
1632 			if (qindex == QUEUE_LRU)
1633 				break;
1634 			qindex = QUEUE_LRU;
1635 			if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU])) == NULL)
1636 				break;
1637 		}
1638 
1639 		/*
1640 		 * Try to free up B_INVAL delayed-write buffers rather then
1641 		 * writing them out.  Note also that NFS is somewhat sensitive
1642 		 * to B_INVAL buffers so it is doubly important that we do
1643 		 * this.
1644 		 */
1645 		if ((bp->b_flags & B_DELWRI) != 0) {
1646 			if (bp->b_flags & B_INVAL) {
1647 				bremfree(bp);
1648 				bp->b_flags |= B_BUSY;
1649 				brelse(bp);
1650 			} else {
1651 				vfs_bio_awrite(bp);
1652 			}
1653 			++r;
1654 			break;
1655 		}
1656 		bp = TAILQ_NEXT(bp, b_freelist);
1657 	}
1658 	return(r);
1659 }
1660 
1661 /*
1662  * Check to see if a block is currently memory resident.
1663  */
1664 struct buf *
1665 incore(struct vnode * vp, daddr_t blkno)
1666 {
1667 	struct buf *bp;
1668 
1669 	int s = splbio();
1670 	bp = gbincore(vp, blkno);
1671 	splx(s);
1672 	return (bp);
1673 }
1674 
1675 /*
1676  * Returns true if no I/O is needed to access the
1677  * associated VM object.  This is like incore except
1678  * it also hunts around in the VM system for the data.
1679  */
1680 
1681 int
1682 inmem(struct vnode * vp, daddr_t blkno)
1683 {
1684 	vm_object_t obj;
1685 	vm_offset_t toff, tinc, size;
1686 	vm_page_t m;
1687 	vm_ooffset_t off;
1688 
1689 	if (incore(vp, blkno))
1690 		return 1;
1691 	if (vp->v_mount == NULL)
1692 		return 0;
1693 	if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0)
1694 		return 0;
1695 
1696 	obj = vp->v_object;
1697 	size = PAGE_SIZE;
1698 	if (size > vp->v_mount->mnt_stat.f_iosize)
1699 		size = vp->v_mount->mnt_stat.f_iosize;
1700 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
1701 
1702 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
1703 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
1704 		if (!m)
1705 			return 0;
1706 		tinc = size;
1707 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
1708 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
1709 		if (vm_page_is_valid(m,
1710 		    (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
1711 			return 0;
1712 	}
1713 	return 1;
1714 }
1715 
1716 /*
1717  *	vfs_setdirty:
1718  *
1719  *	Sets the dirty range for a buffer based on the status of the dirty
1720  *	bits in the pages comprising the buffer.
1721  *
1722  *	The range is limited to the size of the buffer.
1723  *
1724  *	This routine is primarily used by NFS, but is generalized for the
1725  *	B_VMIO case.
1726  */
1727 static void
1728 vfs_setdirty(struct buf *bp)
1729 {
1730 	int i;
1731 	vm_object_t object;
1732 
1733 	/*
1734 	 * Degenerate case - empty buffer
1735 	 */
1736 
1737 	if (bp->b_bufsize == 0)
1738 		return;
1739 
1740 	/*
1741 	 * We qualify the scan for modified pages on whether the
1742 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1743 	 * is not cleared simply by protecting pages off.
1744 	 */
1745 
1746 	if ((bp->b_flags & B_VMIO) == 0)
1747 		return;
1748 
1749 	object = bp->b_pages[0]->object;
1750 
1751 	if ((object->flags & OBJ_WRITEABLE) && !(object->flags & OBJ_MIGHTBEDIRTY))
1752 		printf("Warning: object %p writeable but not mightbedirty\n", object);
1753 	if (!(object->flags & OBJ_WRITEABLE) && (object->flags & OBJ_MIGHTBEDIRTY))
1754 		printf("Warning: object %p mightbedirty but not writeable\n", object);
1755 
1756 	if (object->flags & (OBJ_MIGHTBEDIRTY|OBJ_CLEANING)) {
1757 		vm_offset_t boffset;
1758 		vm_offset_t eoffset;
1759 
1760 		/*
1761 		 * test the pages to see if they have been modified directly
1762 		 * by users through the VM system.
1763 		 */
1764 		for (i = 0; i < bp->b_npages; i++) {
1765 			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
1766 			vm_page_test_dirty(bp->b_pages[i]);
1767 		}
1768 
1769 		/*
1770 		 * Calculate the encompassing dirty range, boffset and eoffset,
1771 		 * (eoffset - boffset) bytes.
1772 		 */
1773 
1774 		for (i = 0; i < bp->b_npages; i++) {
1775 			if (bp->b_pages[i]->dirty)
1776 				break;
1777 		}
1778 		boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1779 
1780 		for (i = bp->b_npages - 1; i >= 0; --i) {
1781 			if (bp->b_pages[i]->dirty) {
1782 				break;
1783 			}
1784 		}
1785 		eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
1786 
1787 		/*
1788 		 * Fit it to the buffer.
1789 		 */
1790 
1791 		if (eoffset > bp->b_bcount)
1792 			eoffset = bp->b_bcount;
1793 
1794 		/*
1795 		 * If we have a good dirty range, merge with the existing
1796 		 * dirty range.
1797 		 */
1798 
1799 		if (boffset < eoffset) {
1800 			if (bp->b_dirtyoff > boffset)
1801 				bp->b_dirtyoff = boffset;
1802 			if (bp->b_dirtyend < eoffset)
1803 				bp->b_dirtyend = eoffset;
1804 		}
1805 	}
1806 }
1807 
1808 /*
1809  *	getblk:
1810  *
1811  *	Get a block given a specified block and offset into a file/device.
1812  *	The buffers B_DONE bit will be cleared on return, making it almost
1813  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
1814  *	return.  The caller should clear B_INVAL prior to initiating a
1815  *	READ.
1816  *
1817  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
1818  *	an existing buffer.
1819  *
1820  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
1821  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
1822  *	and then cleared based on the backing VM.  If the previous buffer is
1823  *	non-0-sized but invalid, B_CACHE will be cleared.
1824  *
1825  *	If getblk() must create a new buffer, the new buffer is returned with
1826  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
1827  *	case it is returned with B_INVAL clear and B_CACHE set based on the
1828  *	backing VM.
1829  *
1830  *	getblk() also forces a VOP_BWRITE() for any B_DELWRI buffer whos
1831  *	B_CACHE bit is clear.
1832  *
1833  *	What this means, basically, is that the caller should use B_CACHE to
1834  *	determine whether the buffer is fully valid or not and should clear
1835  *	B_INVAL prior to issuing a read.  If the caller intends to validate
1836  *	the buffer by loading its data area with something, the caller needs
1837  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
1838  *	the caller should set B_CACHE ( as an optimization ), else the caller
1839  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
1840  *	a write attempt or if it was a successfull read.  If the caller
1841  *	intends to issue a READ, the caller must clear B_INVAL and B_ERROR
1842  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
1843  */
1844 struct buf *
1845 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1846 {
1847 	struct buf *bp;
1848 	int s;
1849 	struct bufhashhdr *bh;
1850 
1851 #if !defined(MAX_PERF)
1852 	if (size > MAXBSIZE)
1853 		panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE);
1854 #endif
1855 
1856 	s = splbio();
1857 loop:
1858 	/*
1859 	 * Block if we are low on buffers.  The syncer is allowed more
1860 	 * buffers in order to avoid a deadlock.
1861 	 */
1862 	if (curproc == updateproc && numfreebuffers == 0) {
1863 		needsbuffer |= VFS_BIO_NEED_ANY;
1864 		tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf",
1865 		    slptimeo);
1866 	} else if (curproc != updateproc && numfreebuffers < lofreebuffers) {
1867 		waitfreebuffers(slpflag, slptimeo);
1868 	}
1869 
1870 	if ((bp = gbincore(vp, blkno))) {
1871 		/*
1872 		 * Buffer is in-core
1873 		 */
1874 
1875 		if (bp->b_flags & B_BUSY) {
1876 			bp->b_flags |= B_WANTED;
1877 			if (bp->b_usecount < BUF_MAXUSE)
1878 				++bp->b_usecount;
1879 
1880 			if (!tsleep(bp,
1881 				(PRIBIO + 4) | slpflag, "getblk", slptimeo)) {
1882 				goto loop;
1883 			}
1884 
1885 			splx(s);
1886 			return (struct buf *) NULL;
1887 		}
1888 
1889 		/*
1890 		 * Busy the buffer.  B_CACHE is cleared if the buffer is
1891 		 * invalid.  Ohterwise, for a non-VMIO buffer, B_CACHE is set
1892 		 * and for a VMIO buffer B_CACHE is adjusted according to the
1893 		 * backing VM cache.
1894 		 */
1895 		bp->b_flags |= B_BUSY;
1896 		if (bp->b_flags & B_INVAL)
1897 			bp->b_flags &= ~B_CACHE;
1898 		else if ((bp->b_flags & (B_VMIO|B_INVAL)) == 0)
1899 			bp->b_flags |= B_CACHE;
1900 		bremfree(bp);
1901 
1902 		/*
1903 		 * check for size inconsistancies for non-VMIO case.
1904 		 */
1905 
1906 		if (bp->b_bcount != size) {
1907 			if ((bp->b_flags & B_VMIO) == 0 ||
1908 			    (size > bp->b_kvasize)
1909 			) {
1910 				if (bp->b_flags & B_DELWRI) {
1911 					bp->b_flags |= B_NOCACHE;
1912 					VOP_BWRITE(bp->b_vp, bp);
1913 				} else {
1914 					if ((bp->b_flags & B_VMIO) &&
1915 					   (LIST_FIRST(&bp->b_dep) == NULL)) {
1916 						bp->b_flags |= B_RELBUF;
1917 						brelse(bp);
1918 					} else {
1919 						bp->b_flags |= B_NOCACHE;
1920 						VOP_BWRITE(bp->b_vp, bp);
1921 					}
1922 				}
1923 				goto loop;
1924 			}
1925 		}
1926 
1927 		/*
1928 		 * If the size is inconsistant in the VMIO case, we can resize
1929 		 * the buffer.  This might lead to B_CACHE getting set or
1930 		 * cleared.  If the size has not changed, B_CACHE remains
1931 		 * unchanged from its previous state.
1932 		 */
1933 
1934 		if (bp->b_bcount != size)
1935 			allocbuf(bp, size);
1936 
1937 		KASSERT(bp->b_offset != NOOFFSET,
1938 		    ("getblk: no buffer offset"));
1939 
1940 		/*
1941 		 * A buffer with B_DELWRI set and B_CACHE clear must
1942 		 * be committed before we can return the buffer in
1943 		 * order to prevent the caller from issuing a read
1944 		 * ( due to B_CACHE not being set ) and overwriting
1945 		 * it.
1946 		 *
1947 		 * Most callers, including NFS and FFS, need this to
1948 		 * operate properly either because they assume they
1949 		 * can issue a read if B_CACHE is not set, or because
1950 		 * ( for example ) an uncached B_DELWRI might loop due
1951 		 * to softupdates re-dirtying the buffer.  In the latter
1952 		 * case, B_CACHE is set after the first write completes,
1953 		 * preventing further loops.
1954 		 */
1955 
1956 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
1957 			VOP_BWRITE(bp->b_vp, bp);
1958 			goto loop;
1959 		}
1960 
1961 		if (bp->b_usecount < BUF_MAXUSE)
1962 			++bp->b_usecount;
1963 		splx(s);
1964 		bp->b_flags &= ~B_DONE;
1965 	} else {
1966 		/*
1967 		 * Buffer is not in-core, create new buffer.  The buffer
1968 		 * returned by getnewbuf() is marked B_BUSY.  Note that the
1969 		 * returned buffer is also considered valid ( not marked
1970 		 * B_INVAL ).
1971 		 */
1972 		int bsize, maxsize, vmio;
1973 		off_t offset;
1974 
1975 		if (vp->v_type == VBLK)
1976 			bsize = DEV_BSIZE;
1977 		else if (vp->v_mountedhere)
1978 			bsize = vp->v_mountedhere->mnt_stat.f_iosize;
1979 		else if (vp->v_mount)
1980 			bsize = vp->v_mount->mnt_stat.f_iosize;
1981 		else
1982 			bsize = size;
1983 
1984 		offset = (off_t)blkno * bsize;
1985 		vmio = (vp->v_object != 0) && (vp->v_flag & VOBJBUF);
1986 		maxsize = vmio ? size + (offset & PAGE_MASK) : size;
1987 		maxsize = imax(maxsize, bsize);
1988 
1989 		if ((bp = getnewbuf(vp, blkno,
1990 			slpflag, slptimeo, size, maxsize)) == NULL) {
1991 			if (slpflag || slptimeo) {
1992 				splx(s);
1993 				return NULL;
1994 			}
1995 			goto loop;
1996 		}
1997 
1998 		/*
1999 		 * This code is used to make sure that a buffer is not
2000 		 * created while the getnewbuf routine is blocked.
2001 		 * This can be a problem whether the vnode is locked or not.
2002 		 * If the buffer is created out from under us, we have to
2003 		 * throw away the one we just created.  There is now window
2004 		 * race because we are safely running at splbio() from the
2005 		 * point of the duplicate buffer creation through to here.
2006 		 */
2007 		if (gbincore(vp, blkno)) {
2008 			bp->b_flags |= B_INVAL;
2009 			brelse(bp);
2010 			goto loop;
2011 		}
2012 
2013 		/*
2014 		 * Insert the buffer into the hash, so that it can
2015 		 * be found by incore.
2016 		 */
2017 		bp->b_blkno = bp->b_lblkno = blkno;
2018 		bp->b_offset = offset;
2019 
2020 		bgetvp(vp, bp);
2021 		LIST_REMOVE(bp, b_hash);
2022 		bh = BUFHASH(vp, blkno);
2023 		LIST_INSERT_HEAD(bh, bp, b_hash);
2024 
2025 		/*
2026 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
2027 		 * buffer size starts out as 0, B_CACHE will be set by
2028 		 * allocbuf() for the VMIO case prior to it testing the
2029 		 * backing store for validity.
2030 		 */
2031 
2032 		if (vmio) {
2033 			bp->b_flags |= B_VMIO;
2034 #if defined(VFS_BIO_DEBUG)
2035 			if (vp->v_type != VREG && vp->v_type != VBLK)
2036 				printf("getblk: vmioing file type %d???\n", vp->v_type);
2037 #endif
2038 		} else {
2039 			bp->b_flags &= ~B_VMIO;
2040 		}
2041 
2042 		allocbuf(bp, size);
2043 
2044 		splx(s);
2045 		bp->b_flags &= ~B_DONE;
2046 	}
2047 	return (bp);
2048 }
2049 
2050 /*
2051  * Get an empty, disassociated buffer of given size.  The buffer is initially
2052  * set to B_INVAL.
2053  */
2054 struct buf *
2055 geteblk(int size)
2056 {
2057 	struct buf *bp;
2058 	int s;
2059 
2060 	s = splbio();
2061 	while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0);
2062 	splx(s);
2063 	allocbuf(bp, size);
2064 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
2065 	return (bp);
2066 }
2067 
2068 
2069 /*
2070  * This code constitutes the buffer memory from either anonymous system
2071  * memory (in the case of non-VMIO operations) or from an associated
2072  * VM object (in the case of VMIO operations).  This code is able to
2073  * resize a buffer up or down.
2074  *
2075  * Note that this code is tricky, and has many complications to resolve
2076  * deadlock or inconsistant data situations.  Tread lightly!!!
2077  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
2078  * the caller.  Calling this code willy nilly can result in the loss of data.
2079  *
2080  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
2081  * B_CACHE for the non-VMIO case.
2082  */
2083 
2084 int
2085 allocbuf(struct buf *bp, int size)
2086 {
2087 	int newbsize, mbsize;
2088 	int i;
2089 
2090 #if !defined(MAX_PERF)
2091 	if (!(bp->b_flags & B_BUSY))
2092 		panic("allocbuf: buffer not busy");
2093 
2094 	if (bp->b_kvasize < size)
2095 		panic("allocbuf: buffer too small");
2096 #endif
2097 
2098 	if ((bp->b_flags & B_VMIO) == 0) {
2099 		caddr_t origbuf;
2100 		int origbufsize;
2101 		/*
2102 		 * Just get anonymous memory from the kernel.  Don't
2103 		 * mess with B_CACHE.
2104 		 */
2105 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2106 #if !defined(NO_B_MALLOC)
2107 		if (bp->b_flags & B_MALLOC)
2108 			newbsize = mbsize;
2109 		else
2110 #endif
2111 			newbsize = round_page(size);
2112 
2113 		if (newbsize < bp->b_bufsize) {
2114 #if !defined(NO_B_MALLOC)
2115 			/*
2116 			 * malloced buffers are not shrunk
2117 			 */
2118 			if (bp->b_flags & B_MALLOC) {
2119 				if (newbsize) {
2120 					bp->b_bcount = size;
2121 				} else {
2122 					free(bp->b_data, M_BIOBUF);
2123 					bufspace -= bp->b_bufsize;
2124 					bufmallocspace -= bp->b_bufsize;
2125 					runningbufspace -= bp->b_bufsize;
2126 					if (bp->b_bufsize)
2127 						bufspacewakeup();
2128 					bp->b_data = bp->b_kvabase;
2129 					bp->b_bufsize = 0;
2130 					bp->b_bcount = 0;
2131 					bp->b_flags &= ~B_MALLOC;
2132 				}
2133 				return 1;
2134 			}
2135 #endif
2136 			vm_hold_free_pages(
2137 			    bp,
2138 			    (vm_offset_t) bp->b_data + newbsize,
2139 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
2140 		} else if (newbsize > bp->b_bufsize) {
2141 #if !defined(NO_B_MALLOC)
2142 			/*
2143 			 * We only use malloced memory on the first allocation.
2144 			 * and revert to page-allocated memory when the buffer grows.
2145 			 */
2146 			if ( (bufmallocspace < maxbufmallocspace) &&
2147 				(bp->b_bufsize == 0) &&
2148 				(mbsize <= PAGE_SIZE/2)) {
2149 
2150 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
2151 				bp->b_bufsize = mbsize;
2152 				bp->b_bcount = size;
2153 				bp->b_flags |= B_MALLOC;
2154 				bufspace += mbsize;
2155 				bufmallocspace += mbsize;
2156 				runningbufspace += bp->b_bufsize;
2157 				return 1;
2158 			}
2159 #endif
2160 			origbuf = NULL;
2161 			origbufsize = 0;
2162 #if !defined(NO_B_MALLOC)
2163 			/*
2164 			 * If the buffer is growing on its other-than-first allocation,
2165 			 * then we revert to the page-allocation scheme.
2166 			 */
2167 			if (bp->b_flags & B_MALLOC) {
2168 				origbuf = bp->b_data;
2169 				origbufsize = bp->b_bufsize;
2170 				bp->b_data = bp->b_kvabase;
2171 				bufspace -= bp->b_bufsize;
2172 				bufmallocspace -= bp->b_bufsize;
2173 				runningbufspace -= bp->b_bufsize;
2174 				if (bp->b_bufsize)
2175 					bufspacewakeup();
2176 				bp->b_bufsize = 0;
2177 				bp->b_flags &= ~B_MALLOC;
2178 				newbsize = round_page(newbsize);
2179 			}
2180 #endif
2181 			vm_hold_load_pages(
2182 			    bp,
2183 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
2184 			    (vm_offset_t) bp->b_data + newbsize);
2185 #if !defined(NO_B_MALLOC)
2186 			if (origbuf) {
2187 				bcopy(origbuf, bp->b_data, origbufsize);
2188 				free(origbuf, M_BIOBUF);
2189 			}
2190 #endif
2191 		}
2192 	} else {
2193 		vm_page_t m;
2194 		int desiredpages;
2195 
2196 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
2197 		desiredpages = (size == 0) ? 0 :
2198 			num_pages((bp->b_offset & PAGE_MASK) + newbsize);
2199 
2200 #if !defined(NO_B_MALLOC)
2201 		if (bp->b_flags & B_MALLOC)
2202 			panic("allocbuf: VMIO buffer can't be malloced");
2203 #endif
2204 		/*
2205 		 * Set B_CACHE initially if buffer is 0 length or will become
2206 		 * 0-length.
2207 		 */
2208 		if (size == 0 || bp->b_bufsize == 0)
2209 			bp->b_flags |= B_CACHE;
2210 
2211 		if (newbsize < bp->b_bufsize) {
2212 			/*
2213 			 * DEV_BSIZE aligned new buffer size is less then the
2214 			 * DEV_BSIZE aligned existing buffer size.  Figure out
2215 			 * if we have to remove any pages.
2216 			 */
2217 			if (desiredpages < bp->b_npages) {
2218 				for (i = desiredpages; i < bp->b_npages; i++) {
2219 					/*
2220 					 * the page is not freed here -- it
2221 					 * is the responsibility of
2222 					 * vnode_pager_setsize
2223 					 */
2224 					m = bp->b_pages[i];
2225 					KASSERT(m != bogus_page,
2226 					    ("allocbuf: bogus page found"));
2227 					while (vm_page_sleep_busy(m, TRUE, "biodep"))
2228 						;
2229 
2230 					bp->b_pages[i] = NULL;
2231 					vm_page_unwire(m, 0);
2232 				}
2233 				pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) +
2234 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
2235 				bp->b_npages = desiredpages;
2236 			}
2237 		} else if (size > bp->b_bcount) {
2238 			/*
2239 			 * We are growing the buffer, possibly in a
2240 			 * byte-granular fashion.
2241 			 */
2242 			struct vnode *vp;
2243 			vm_object_t obj;
2244 			vm_offset_t toff;
2245 			vm_offset_t tinc;
2246 
2247 			/*
2248 			 * Step 1, bring in the VM pages from the object,
2249 			 * allocating them if necessary.  We must clear
2250 			 * B_CACHE if these pages are not valid for the
2251 			 * range covered by the buffer.
2252 			 */
2253 
2254 			vp = bp->b_vp;
2255 			obj = vp->v_object;
2256 
2257 			while (bp->b_npages < desiredpages) {
2258 				vm_page_t m;
2259 				vm_pindex_t pi;
2260 
2261 				pi = OFF_TO_IDX(bp->b_offset) + bp->b_npages;
2262 				if ((m = vm_page_lookup(obj, pi)) == NULL) {
2263 					m = vm_page_alloc(obj, pi, VM_ALLOC_NORMAL);
2264 					if (m == NULL) {
2265 						VM_WAIT;
2266 						vm_pageout_deficit += desiredpages - bp->b_npages;
2267 					} else {
2268 						vm_page_wire(m);
2269 						vm_page_wakeup(m);
2270 						bp->b_flags &= ~B_CACHE;
2271 						bp->b_pages[bp->b_npages] = m;
2272 						++bp->b_npages;
2273 					}
2274 					continue;
2275 				}
2276 
2277 				/*
2278 				 * We found a page.  If we have to sleep on it,
2279 				 * retry because it might have gotten freed out
2280 				 * from under us.
2281 				 *
2282 				 * We can only test PG_BUSY here.  Blocking on
2283 				 * m->busy might lead to a deadlock:
2284 				 *
2285 				 *  vm_fault->getpages->cluster_read->allocbuf
2286 				 *
2287 				 */
2288 
2289 				if (vm_page_sleep_busy(m, FALSE, "pgtblk"))
2290 					continue;
2291 
2292 				/*
2293 				 * We have a good page.  Should we wakeup the
2294 				 * page daemon?
2295 				 */
2296 				if ((curproc != pageproc) &&
2297 				    ((m->queue - m->pc) == PQ_CACHE) &&
2298 				    ((cnt.v_free_count + cnt.v_cache_count) <
2299 					(cnt.v_free_min + cnt.v_cache_min))
2300 				) {
2301 					pagedaemon_wakeup();
2302 				}
2303 				vm_page_flag_clear(m, PG_ZERO);
2304 				vm_page_wire(m);
2305 				bp->b_pages[bp->b_npages] = m;
2306 				++bp->b_npages;
2307 			}
2308 
2309 			/*
2310 			 * Step 2.  We've loaded the pages into the buffer,
2311 			 * we have to figure out if we can still have B_CACHE
2312 			 * set.  Note that B_CACHE is set according to the
2313 			 * byte-granular range ( bcount and size ), new the
2314 			 * aligned range ( newbsize ).
2315 			 *
2316 			 * The VM test is against m->valid, which is DEV_BSIZE
2317 			 * aligned.  Needless to say, the validity of the data
2318 			 * needs to also be DEV_BSIZE aligned.  Note that this
2319 			 * fails with NFS if the server or some other client
2320 			 * extends the file's EOF.  If our buffer is resized,
2321 			 * B_CACHE may remain set! XXX
2322 			 */
2323 
2324 			toff = bp->b_bcount;
2325 			tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
2326 
2327 			while ((bp->b_flags & B_CACHE) && toff < size) {
2328 				vm_pindex_t pi;
2329 
2330 				if (tinc > (size - toff))
2331 					tinc = size - toff;
2332 
2333 				pi = ((bp->b_offset & PAGE_MASK) + toff) >>
2334 				    PAGE_SHIFT;
2335 
2336 				vfs_buf_test_cache(
2337 				    bp,
2338 				    bp->b_offset,
2339 				    toff,
2340 				    tinc,
2341 				    bp->b_pages[pi]
2342 				);
2343 				toff += tinc;
2344 				tinc = PAGE_SIZE;
2345 			}
2346 
2347 			/*
2348 			 * Step 3, fixup the KVM pmap.  Remember that
2349 			 * bp->b_data is relative to bp->b_offset, but
2350 			 * bp->b_offset may be offset into the first page.
2351 			 */
2352 
2353 			bp->b_data = (caddr_t)
2354 			    trunc_page((vm_offset_t)bp->b_data);
2355 			pmap_qenter(
2356 			    (vm_offset_t)bp->b_data,
2357 			    bp->b_pages,
2358 			    bp->b_npages
2359 			);
2360 			bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
2361 			    (vm_offset_t)(bp->b_offset & PAGE_MASK));
2362 		}
2363 	}
2364 	if (bp->b_flags & B_VMIO)
2365 		vmiospace += (newbsize - bp->b_bufsize);
2366 	bufspace += (newbsize - bp->b_bufsize);
2367 	runningbufspace += (newbsize - bp->b_bufsize);
2368 	if (newbsize < bp->b_bufsize)
2369 		bufspacewakeup();
2370 	bp->b_bufsize = newbsize;	/* actual buffer allocation	*/
2371 	bp->b_bcount = size;		/* requested buffer size	*/
2372 	return 1;
2373 }
2374 
2375 /*
2376  *	biowait:
2377  *
2378  *	Wait for buffer I/O completion, returning error status.  The buffer
2379  *	is left B_BUSY|B_DONE on return.  B_EINTR is converted into a EINTR
2380  *	error and cleared.
2381  */
2382 int
2383 biowait(register struct buf * bp)
2384 {
2385 	int s;
2386 
2387 	s = splbio();
2388 	while ((bp->b_flags & B_DONE) == 0)
2389 #if defined(NO_SCHEDULE_MODS)
2390 		tsleep(bp, PRIBIO, "biowait", 0);
2391 #else
2392 		if (bp->b_flags & B_READ)
2393 			tsleep(bp, PRIBIO, "biord", 0);
2394 		else
2395 			tsleep(bp, PRIBIO, "biowr", 0);
2396 #endif
2397 	splx(s);
2398 	if (bp->b_flags & B_EINTR) {
2399 		bp->b_flags &= ~B_EINTR;
2400 		return (EINTR);
2401 	}
2402 	if (bp->b_flags & B_ERROR) {
2403 		return (bp->b_error ? bp->b_error : EIO);
2404 	} else {
2405 		return (0);
2406 	}
2407 }
2408 
2409 /*
2410  *	biodone:
2411  *
2412  *	Finish I/O on a buffer, optionally calling a completion function.
2413  *	This is usually called from an interrupt so process blocking is
2414  *	not allowed.
2415  *
2416  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
2417  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
2418  *	assuming B_INVAL is clear.
2419  *
2420  *	For the VMIO case, we set B_CACHE if the op was a read and no
2421  *	read error occured, or if the op was a write.  B_CACHE is never
2422  *	set if the buffer is invalid or otherwise uncacheable.
2423  *
2424  *	biodone does not mess with B_INVAL, allowing the I/O routine or the
2425  *	initiator to leave B_INVAL set to brelse the buffer out of existance
2426  *	in the biodone routine.
2427  */
2428 void
2429 biodone(register struct buf * bp)
2430 {
2431 	int s;
2432 
2433 	s = splbio();
2434 
2435 	KASSERT((bp->b_flags & B_BUSY), ("biodone: bp %p not busy", bp));
2436 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
2437 
2438 	bp->b_flags |= B_DONE;
2439 
2440 	if (bp->b_flags & B_FREEBUF) {
2441 		brelse(bp);
2442 		splx(s);
2443 		return;
2444 	}
2445 
2446 	if ((bp->b_flags & B_READ) == 0) {
2447 		vwakeup(bp);
2448 	}
2449 
2450 	/* call optional completion function if requested */
2451 	if (bp->b_flags & B_CALL) {
2452 		bp->b_flags &= ~B_CALL;
2453 		(*bp->b_iodone) (bp);
2454 		splx(s);
2455 		return;
2456 	}
2457 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
2458 		(*bioops.io_complete)(bp);
2459 
2460 	if (bp->b_flags & B_VMIO) {
2461 		int i, resid;
2462 		vm_ooffset_t foff;
2463 		vm_page_t m;
2464 		vm_object_t obj;
2465 		int iosize;
2466 		struct vnode *vp = bp->b_vp;
2467 
2468 		obj = vp->v_object;
2469 
2470 #if defined(VFS_BIO_DEBUG)
2471 		if (vp->v_usecount == 0) {
2472 			panic("biodone: zero vnode ref count");
2473 		}
2474 
2475 		if (vp->v_object == NULL) {
2476 			panic("biodone: missing VM object");
2477 		}
2478 
2479 		if ((vp->v_flag & VOBJBUF) == 0) {
2480 			panic("biodone: vnode is not setup for merged cache");
2481 		}
2482 #endif
2483 
2484 		foff = bp->b_offset;
2485 		KASSERT(bp->b_offset != NOOFFSET,
2486 		    ("biodone: no buffer offset"));
2487 
2488 #if !defined(MAX_PERF)
2489 		if (!obj) {
2490 			panic("biodone: no object");
2491 		}
2492 #endif
2493 #if defined(VFS_BIO_DEBUG)
2494 		if (obj->paging_in_progress < bp->b_npages) {
2495 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
2496 			    obj->paging_in_progress, bp->b_npages);
2497 		}
2498 #endif
2499 
2500 		/*
2501 		 * Set B_CACHE if the op was a normal read and no error
2502 		 * occured.  B_CACHE is set for writes in the b*write()
2503 		 * routines.
2504 		 */
2505 		iosize = bp->b_bcount;
2506 		if ((bp->b_flags & (B_READ|B_FREEBUF|B_INVAL|B_NOCACHE|B_ERROR)) == B_READ) {
2507 			bp->b_flags |= B_CACHE;
2508 		}
2509 
2510 		for (i = 0; i < bp->b_npages; i++) {
2511 			int bogusflag = 0;
2512 			m = bp->b_pages[i];
2513 			if (m == bogus_page) {
2514 				bogusflag = 1;
2515 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
2516 				if (!m) {
2517 #if defined(VFS_BIO_DEBUG)
2518 					printf("biodone: page disappeared\n");
2519 #endif
2520 					vm_object_pip_subtract(obj, 1);
2521 					bp->b_flags &= ~B_CACHE;
2522 					continue;
2523 				}
2524 				bp->b_pages[i] = m;
2525 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2526 			}
2527 #if defined(VFS_BIO_DEBUG)
2528 			if (OFF_TO_IDX(foff) != m->pindex) {
2529 				printf(
2530 "biodone: foff(%lu)/m->pindex(%d) mismatch\n",
2531 				    (unsigned long)foff, m->pindex);
2532 			}
2533 #endif
2534 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
2535 			if (resid > iosize)
2536 				resid = iosize;
2537 
2538 			/*
2539 			 * In the write case, the valid and clean bits are
2540 			 * already changed correctly ( see bdwrite() ), so we
2541 			 * only need to do this here in the read case.
2542 			 */
2543 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
2544 				vfs_page_set_valid(bp, foff, i, m);
2545 			}
2546 			vm_page_flag_clear(m, PG_ZERO);
2547 
2548 			/*
2549 			 * when debugging new filesystems or buffer I/O methods, this
2550 			 * is the most common error that pops up.  if you see this, you
2551 			 * have not set the page busy flag correctly!!!
2552 			 */
2553 			if (m->busy == 0) {
2554 #if !defined(MAX_PERF)
2555 				printf("biodone: page busy < 0, "
2556 				    "pindex: %d, foff: 0x(%x,%x), "
2557 				    "resid: %d, index: %d\n",
2558 				    (int) m->pindex, (int)(foff >> 32),
2559 						(int) foff & 0xffffffff, resid, i);
2560 #endif
2561 				if (vp->v_type != VBLK)
2562 #if !defined(MAX_PERF)
2563 					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
2564 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
2565 					    (int) bp->b_lblkno,
2566 					    bp->b_flags, bp->b_npages);
2567 				else
2568 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
2569 					    (int) bp->b_lblkno,
2570 					    bp->b_flags, bp->b_npages);
2571 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
2572 				    m->valid, m->dirty, m->wire_count);
2573 #endif
2574 				panic("biodone: page busy < 0\n");
2575 			}
2576 			vm_page_io_finish(m);
2577 			vm_object_pip_subtract(obj, 1);
2578 			foff += resid;
2579 			iosize -= resid;
2580 		}
2581 		if (obj)
2582 			vm_object_pip_wakeupn(obj, 0);
2583 	}
2584 	/*
2585 	 * For asynchronous completions, release the buffer now. The brelse
2586 	 * checks for B_WANTED and will do the wakeup there if necessary - so
2587 	 * no need to do a wakeup here in the async case.
2588 	 */
2589 
2590 	if (bp->b_flags & B_ASYNC) {
2591 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
2592 			brelse(bp);
2593 		else
2594 			bqrelse(bp);
2595 	} else {
2596 		bp->b_flags &= ~B_WANTED;
2597 		wakeup(bp);
2598 	}
2599 	splx(s);
2600 }
2601 
2602 #if 0	/* not with kirks code */
2603 static int vfs_update_interval = 30;
2604 
2605 static void
2606 vfs_update()
2607 {
2608 	while (1) {
2609 		tsleep(&vfs_update_wakeup, PUSER, "update",
2610 		    hz * vfs_update_interval);
2611 		vfs_update_wakeup = 0;
2612 		sync(curproc, NULL);
2613 	}
2614 }
2615 
2616 static int
2617 sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
2618 {
2619 	int error = sysctl_handle_int(oidp,
2620 		oidp->oid_arg1, oidp->oid_arg2, req);
2621 	if (!error)
2622 		wakeup(&vfs_update_wakeup);
2623 	return error;
2624 }
2625 
2626 SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
2627 	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
2628 
2629 #endif
2630 
2631 
2632 /*
2633  * This routine is called in lieu of iodone in the case of
2634  * incomplete I/O.  This keeps the busy status for pages
2635  * consistant.
2636  */
2637 void
2638 vfs_unbusy_pages(struct buf * bp)
2639 {
2640 	int i;
2641 
2642 	if (bp->b_flags & B_VMIO) {
2643 		struct vnode *vp = bp->b_vp;
2644 		vm_object_t obj = vp->v_object;
2645 
2646 		for (i = 0; i < bp->b_npages; i++) {
2647 			vm_page_t m = bp->b_pages[i];
2648 
2649 			if (m == bogus_page) {
2650 				m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
2651 #if !defined(MAX_PERF)
2652 				if (!m) {
2653 					panic("vfs_unbusy_pages: page missing\n");
2654 				}
2655 #endif
2656 				bp->b_pages[i] = m;
2657 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2658 			}
2659 			vm_object_pip_subtract(obj, 1);
2660 			vm_page_flag_clear(m, PG_ZERO);
2661 			vm_page_io_finish(m);
2662 		}
2663 		vm_object_pip_wakeupn(obj, 0);
2664 	}
2665 }
2666 
2667 /*
2668  * vfs_page_set_valid:
2669  *
2670  *	Set the valid bits in a page based on the supplied offset.   The
2671  *	range is restricted to the buffer's size.
2672  *
2673  *	This routine is typically called after a read completes.
2674  */
2675 static void
2676 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
2677 {
2678 	vm_ooffset_t soff, eoff;
2679 
2680 	/*
2681 	 * Start and end offsets in buffer.  eoff - soff may not cross a
2682 	 * page boundry or cross the end of the buffer.  The end of the
2683 	 * buffer, in this case, is our file EOF, not the allocation size
2684 	 * of the buffer.
2685 	 */
2686 	soff = off;
2687 	eoff = (off + PAGE_SIZE) & ~PAGE_MASK;
2688 	if (eoff > bp->b_offset + bp->b_bcount)
2689 		eoff = bp->b_offset + bp->b_bcount;
2690 
2691 	/*
2692 	 * Set valid range.  This is typically the entire buffer and thus the
2693 	 * entire page.
2694 	 */
2695 	if (eoff > soff) {
2696 		vm_page_set_validclean(
2697 		    m,
2698 		   (vm_offset_t) (soff & PAGE_MASK),
2699 		   (vm_offset_t) (eoff - soff)
2700 		);
2701 	}
2702 }
2703 
2704 /*
2705  * This routine is called before a device strategy routine.
2706  * It is used to tell the VM system that paging I/O is in
2707  * progress, and treat the pages associated with the buffer
2708  * almost as being PG_BUSY.  Also the object paging_in_progress
2709  * flag is handled to make sure that the object doesn't become
2710  * inconsistant.
2711  *
2712  * Since I/O has not been initiated yet, certain buffer flags
2713  * such as B_ERROR or B_INVAL may be in an inconsistant state
2714  * and should be ignored.
2715  */
2716 void
2717 vfs_busy_pages(struct buf * bp, int clear_modify)
2718 {
2719 	int i, bogus;
2720 
2721 	if (bp->b_flags & B_VMIO) {
2722 		struct vnode *vp = bp->b_vp;
2723 		vm_object_t obj = vp->v_object;
2724 		vm_ooffset_t foff;
2725 
2726 		foff = bp->b_offset;
2727 		KASSERT(bp->b_offset != NOOFFSET,
2728 		    ("vfs_busy_pages: no buffer offset"));
2729 		vfs_setdirty(bp);
2730 
2731 retry:
2732 		for (i = 0; i < bp->b_npages; i++) {
2733 			vm_page_t m = bp->b_pages[i];
2734 			if (vm_page_sleep_busy(m, FALSE, "vbpage"))
2735 				goto retry;
2736 		}
2737 
2738 		bogus = 0;
2739 		for (i = 0; i < bp->b_npages; i++) {
2740 			vm_page_t m = bp->b_pages[i];
2741 
2742 			vm_page_flag_clear(m, PG_ZERO);
2743 			if ((bp->b_flags & B_CLUSTER) == 0) {
2744 				vm_object_pip_add(obj, 1);
2745 				vm_page_io_start(m);
2746 			}
2747 
2748 			/*
2749 			 * When readying a buffer for a read ( i.e
2750 			 * clear_modify == 0 ), it is important to do
2751 			 * bogus_page replacement for valid pages in
2752 			 * partially instantiated buffers.  Partially
2753 			 * instantiated buffers can, in turn, occur when
2754 			 * reconstituting a buffer from its VM backing store
2755 			 * base.  We only have to do this if B_CACHE is
2756 			 * clear ( which causes the I/O to occur in the
2757 			 * first place ).  The replacement prevents the read
2758 			 * I/O from overwriting potentially dirty VM-backed
2759 			 * pages.  XXX bogus page replacement is, uh, bogus.
2760 			 * It may not work properly with small-block devices.
2761 			 * We need to find a better way.
2762 			 */
2763 
2764 			vm_page_protect(m, VM_PROT_NONE);
2765 			if (clear_modify)
2766 				vfs_page_set_valid(bp, foff, i, m);
2767 			else if (m->valid == VM_PAGE_BITS_ALL &&
2768 				(bp->b_flags & B_CACHE) == 0) {
2769 				bp->b_pages[i] = bogus_page;
2770 				bogus++;
2771 			}
2772 			foff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2773 		}
2774 		if (bogus)
2775 			pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
2776 	}
2777 }
2778 
2779 /*
2780  * Tell the VM system that the pages associated with this buffer
2781  * are clean.  This is used for delayed writes where the data is
2782  * going to go to disk eventually without additional VM intevention.
2783  *
2784  * Note that while we only really need to clean through to b_bcount, we
2785  * just go ahead and clean through to b_bufsize.
2786  */
2787 static void
2788 vfs_clean_pages(struct buf * bp)
2789 {
2790 	int i;
2791 
2792 	if (bp->b_flags & B_VMIO) {
2793 		vm_ooffset_t foff;
2794 
2795 		foff = bp->b_offset;
2796 		KASSERT(bp->b_offset != NOOFFSET,
2797 		    ("vfs_clean_pages: no buffer offset"));
2798 		for (i = 0; i < bp->b_npages; i++) {
2799 			vm_page_t m = bp->b_pages[i];
2800 			vm_ooffset_t noff = (foff + PAGE_SIZE) & ~PAGE_MASK;
2801 			vm_ooffset_t eoff = noff;
2802 
2803 			if (eoff > bp->b_offset + bp->b_bufsize)
2804 				eoff = bp->b_offset + bp->b_bufsize;
2805 			vfs_page_set_valid(bp, foff, i, m);
2806 			/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
2807 			foff = noff;
2808 		}
2809 	}
2810 }
2811 
2812 /*
2813  *	vfs_bio_set_validclean:
2814  *
2815  *	Set the range within the buffer to valid and clean.  The range is
2816  *	relative to the beginning of the buffer, b_offset.  Note that b_offset
2817  *	itself may be offset from the beginning of the first page.
2818  */
2819 
2820 void
2821 vfs_bio_set_validclean(struct buf *bp, int base, int size)
2822 {
2823 	if (bp->b_flags & B_VMIO) {
2824 		int i;
2825 		int n;
2826 
2827 		/*
2828 		 * Fixup base to be relative to beginning of first page.
2829 		 * Set initial n to be the maximum number of bytes in the
2830 		 * first page that can be validated.
2831 		 */
2832 
2833 		base += (bp->b_offset & PAGE_MASK);
2834 		n = PAGE_SIZE - (base & PAGE_MASK);
2835 
2836 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
2837 			vm_page_t m = bp->b_pages[i];
2838 
2839 			if (n > size)
2840 				n = size;
2841 
2842 			vm_page_set_validclean(m, base & PAGE_MASK, n);
2843 			base += n;
2844 			size -= n;
2845 			n = PAGE_SIZE;
2846 		}
2847 	}
2848 }
2849 
2850 /*
2851  *	vfs_bio_clrbuf:
2852  *
2853  *	clear a buffer.  This routine essentially fakes an I/O, so we need
2854  *	to clear B_ERROR and B_INVAL.
2855  *
2856  *	Note that while we only theoretically need to clear through b_bcount,
2857  *	we go ahead and clear through b_bufsize.
2858  */
2859 
2860 void
2861 vfs_bio_clrbuf(struct buf *bp) {
2862 	int i, mask = 0;
2863 	caddr_t sa, ea;
2864 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
2865 		bp->b_flags &= ~(B_INVAL|B_ERROR);
2866 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
2867 		    (bp->b_offset & PAGE_MASK) == 0) {
2868 			mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
2869 			if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
2870 			    ((bp->b_pages[0]->valid & mask) != mask)) {
2871 				bzero(bp->b_data, bp->b_bufsize);
2872 			}
2873 			bp->b_pages[0]->valid |= mask;
2874 			bp->b_resid = 0;
2875 			return;
2876 		}
2877 		ea = sa = bp->b_data;
2878 		for(i=0;i<bp->b_npages;i++,sa=ea) {
2879 			int j = ((u_long)sa & PAGE_MASK) / DEV_BSIZE;
2880 			ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
2881 			ea = (caddr_t)ulmin((u_long)ea,
2882 				(u_long)bp->b_data + bp->b_bufsize);
2883 			mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
2884 			if ((bp->b_pages[i]->valid & mask) == mask)
2885 				continue;
2886 			if ((bp->b_pages[i]->valid & mask) == 0) {
2887 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
2888 					bzero(sa, ea - sa);
2889 				}
2890 			} else {
2891 				for (; sa < ea; sa += DEV_BSIZE, j++) {
2892 					if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
2893 						(bp->b_pages[i]->valid & (1<<j)) == 0)
2894 						bzero(sa, DEV_BSIZE);
2895 				}
2896 			}
2897 			bp->b_pages[i]->valid |= mask;
2898 			vm_page_flag_clear(bp->b_pages[i], PG_ZERO);
2899 		}
2900 		bp->b_resid = 0;
2901 	} else {
2902 		clrbuf(bp);
2903 	}
2904 }
2905 
2906 /*
2907  * vm_hold_load_pages and vm_hold_unload pages get pages into
2908  * a buffers address space.  The pages are anonymous and are
2909  * not associated with a file object.
2910  */
2911 void
2912 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2913 {
2914 	vm_offset_t pg;
2915 	vm_page_t p;
2916 	int index;
2917 
2918 	to = round_page(to);
2919 	from = round_page(from);
2920 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2921 
2922 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2923 
2924 tryagain:
2925 
2926 		p = vm_page_alloc(kernel_object,
2927 			((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
2928 		    VM_ALLOC_NORMAL);
2929 		if (!p) {
2930 			vm_pageout_deficit += (to - from) >> PAGE_SHIFT;
2931 			VM_WAIT;
2932 			goto tryagain;
2933 		}
2934 		vm_page_wire(p);
2935 		p->valid = VM_PAGE_BITS_ALL;
2936 		vm_page_flag_clear(p, PG_ZERO);
2937 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
2938 		bp->b_pages[index] = p;
2939 		vm_page_wakeup(p);
2940 	}
2941 	bp->b_npages = index;
2942 }
2943 
2944 void
2945 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
2946 {
2947 	vm_offset_t pg;
2948 	vm_page_t p;
2949 	int index, newnpages;
2950 
2951 	from = round_page(from);
2952 	to = round_page(to);
2953 	newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
2954 
2955 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
2956 		p = bp->b_pages[index];
2957 		if (p && (index < bp->b_npages)) {
2958 #if !defined(MAX_PERF)
2959 			if (p->busy) {
2960 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
2961 					bp->b_blkno, bp->b_lblkno);
2962 			}
2963 #endif
2964 			bp->b_pages[index] = NULL;
2965 			pmap_kremove(pg);
2966 			vm_page_busy(p);
2967 			vm_page_unwire(p, 0);
2968 			vm_page_free(p);
2969 		}
2970 	}
2971 	bp->b_npages = newnpages;
2972 }
2973 
2974 
2975 #include "opt_ddb.h"
2976 #ifdef DDB
2977 #include <ddb/ddb.h>
2978 
2979 DB_SHOW_COMMAND(buffer, db_show_buffer)
2980 {
2981 	/* get args */
2982 	struct buf *bp = (struct buf *)addr;
2983 
2984 	if (!have_addr) {
2985 		db_printf("usage: show buffer <addr>\n");
2986 		return;
2987 	}
2988 
2989 	db_printf("b_flags = 0x%b\n", (u_int)bp->b_flags, PRINT_BUF_FLAGS);
2990 	db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, "
2991 		  "b_resid = %ld\nb_dev = (%d,%d), b_data = %p, "
2992 		  "b_blkno = %d, b_pblkno = %d\n",
2993 		  bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
2994 		  major(bp->b_dev), minor(bp->b_dev),
2995 		  bp->b_data, bp->b_blkno, bp->b_pblkno);
2996 	if (bp->b_npages) {
2997 		int i;
2998 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
2999 		for (i = 0; i < bp->b_npages; i++) {
3000 			vm_page_t m;
3001 			m = bp->b_pages[i];
3002 			db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object,
3003 			    (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m));
3004 			if ((i + 1) < bp->b_npages)
3005 				db_printf(",");
3006 		}
3007 		db_printf("\n");
3008 	}
3009 }
3010 #endif /* DDB */
3011