xref: /freebsd/sys/kern/vfs_bio.c (revision e627b39baccd1ec9129690167cf5e6d860509655)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17  *    is allowed if this notation is included.
18  * 5. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  *
21  * $Id: vfs_bio.c,v 1.101 1996/09/18 15:57:41 dyson Exp $
22  */
23 
24 /*
25  * this file contains a new buffer I/O scheme implementing a coherent
26  * VM object and buffer cache scheme.  Pains have been taken to make
27  * sure that the performance degradation associated with schemes such
28  * as this is not realized.
29  *
30  * Author:  John S. Dyson
31  * Significant help during the development and debugging phases
32  * had been provided by David Greenman, also of the FreeBSD core team.
33  */
34 
35 #include "opt_bounce.h"
36 
37 #define VMIO
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/vmmeter.h>
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_prot.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_extern.h>
54 #include <sys/buf.h>
55 #include <sys/mount.h>
56 #include <sys/malloc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/proc.h>
59 
60 #include <miscfs/specfs/specdev.h>
61 
62 static void vfs_update __P((void));
63 static struct	proc *updateproc;
64 static struct kproc_desc up_kp = {
65 	"update",
66 	vfs_update,
67 	&updateproc
68 };
69 SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
70 
71 struct buf *buf;		/* buffer header pool */
72 struct swqueue bswlist;
73 
74 int count_lock_queue __P((void));
75 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
76 		vm_offset_t to);
77 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
78 		vm_offset_t to);
79 static void vfs_clean_pages(struct buf * bp);
80 static void vfs_setdirty(struct buf *bp);
81 static void vfs_vmio_release(struct buf *bp);
82 
83 int needsbuffer;
84 
85 /*
86  * Internal update daemon, process 3
87  *	The variable vfs_update_wakeup allows for internal syncs.
88  */
89 int vfs_update_wakeup;
90 
91 
92 /*
93  * buffers base kva
94  */
95 caddr_t buffers_kva;
96 
97 /*
98  * bogus page -- for I/O to/from partially complete buffers
99  * this is a temporary solution to the problem, but it is not
100  * really that bad.  it would be better to split the buffer
101  * for input in the case of buffers partially already in memory,
102  * but the code is intricate enough already.
103  */
104 vm_page_t bogus_page;
105 static vm_offset_t bogus_offset;
106 
107 static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
108 	bufmallocspace, maxbufmallocspace;
109 
110 static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
111 static struct bqueues bufqueues[BUFFER_QUEUES];
112 
113 extern int vm_swap_size;
114 
115 #define BUF_MAXUSE 8
116 /*
117 #define NO_B_MALLOC
118 */
119 
120 /*
121  * Initialize buffer headers and related structures.
122  */
123 void
124 bufinit()
125 {
126 	struct buf *bp;
127 	int i;
128 
129 	TAILQ_INIT(&bswlist);
130 	LIST_INIT(&invalhash);
131 
132 	/* first, make a null hash table */
133 	for (i = 0; i < BUFHSZ; i++)
134 		LIST_INIT(&bufhashtbl[i]);
135 
136 	/* next, make a null set of free lists */
137 	for (i = 0; i < BUFFER_QUEUES; i++)
138 		TAILQ_INIT(&bufqueues[i]);
139 
140 	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
141 	/* finally, initialize each buffer header and stick on empty q */
142 	for (i = 0; i < nbuf; i++) {
143 		bp = &buf[i];
144 		bzero(bp, sizeof *bp);
145 		bp->b_flags = B_INVAL;	/* we're just an empty header */
146 		bp->b_dev = NODEV;
147 		bp->b_rcred = NOCRED;
148 		bp->b_wcred = NOCRED;
149 		bp->b_qindex = QUEUE_EMPTY;
150 		bp->b_vnbufs.le_next = NOLIST;
151 		bp->b_data = buffers_kva + i * MAXBSIZE;
152 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
153 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
154 	}
155 /*
156  * maxbufspace is currently calculated to support all filesystem blocks
157  * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
158  * cache is still the same as it would be for 8K filesystems.  This
159  * keeps the size of the buffer cache "in check" for big block filesystems.
160  */
161 	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
162 /*
163  * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
164  */
165 	maxvmiobufspace = 2 * maxbufspace / 3;
166 /*
167  * Limit the amount of malloc memory since it is wired permanently into
168  * the kernel space.  Even though this is accounted for in the buffer
169  * allocation, we don't want the malloced region to grow uncontrolled.
170  * The malloc scheme improves memory utilization significantly on average
171  * (small) directories.
172  */
173 	maxbufmallocspace = maxbufspace / 20;
174 
175 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
176 	bogus_page = vm_page_alloc(kernel_object,
177 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
178 			VM_ALLOC_NORMAL);
179 
180 }
181 
182 /*
183  * remove the buffer from the appropriate free list
184  */
185 void
186 bremfree(struct buf * bp)
187 {
188 	int s = splbio();
189 
190 	if (bp->b_qindex != QUEUE_NONE) {
191 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
192 		bp->b_qindex = QUEUE_NONE;
193 	} else {
194 		panic("bremfree: removing a buffer when not on a queue");
195 	}
196 	splx(s);
197 }
198 
199 /*
200  * Get a buffer with the specified data.  Look in the cache first.
201  */
202 int
203 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
204     struct buf ** bpp)
205 {
206 	struct buf *bp;
207 
208 	bp = getblk(vp, blkno, size, 0, 0);
209 	*bpp = bp;
210 
211 	/* if not found in cache, do some I/O */
212 	if ((bp->b_flags & B_CACHE) == 0) {
213 		if (curproc != NULL)
214 			curproc->p_stats->p_ru.ru_inblock++;
215 		bp->b_flags |= B_READ;
216 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
217 		if (bp->b_rcred == NOCRED) {
218 			if (cred != NOCRED)
219 				crhold(cred);
220 			bp->b_rcred = cred;
221 		}
222 		vfs_busy_pages(bp, 0);
223 		VOP_STRATEGY(bp);
224 		return (biowait(bp));
225 	}
226 	return (0);
227 }
228 
229 /*
230  * Operates like bread, but also starts asynchronous I/O on
231  * read-ahead blocks.
232  */
233 int
234 breadn(struct vnode * vp, daddr_t blkno, int size,
235     daddr_t * rablkno, int *rabsize,
236     int cnt, struct ucred * cred, struct buf ** bpp)
237 {
238 	struct buf *bp, *rabp;
239 	int i;
240 	int rv = 0, readwait = 0;
241 
242 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
243 
244 	/* if not found in cache, do some I/O */
245 	if ((bp->b_flags & B_CACHE) == 0) {
246 		if (curproc != NULL)
247 			curproc->p_stats->p_ru.ru_inblock++;
248 		bp->b_flags |= B_READ;
249 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
250 		if (bp->b_rcred == NOCRED) {
251 			if (cred != NOCRED)
252 				crhold(cred);
253 			bp->b_rcred = cred;
254 		}
255 		vfs_busy_pages(bp, 0);
256 		VOP_STRATEGY(bp);
257 		++readwait;
258 	}
259 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
260 		if (inmem(vp, *rablkno))
261 			continue;
262 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
263 
264 		if ((rabp->b_flags & B_CACHE) == 0) {
265 			if (curproc != NULL)
266 				curproc->p_stats->p_ru.ru_inblock++;
267 			rabp->b_flags |= B_READ | B_ASYNC;
268 			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
269 			if (rabp->b_rcred == NOCRED) {
270 				if (cred != NOCRED)
271 					crhold(cred);
272 				rabp->b_rcred = cred;
273 			}
274 			vfs_busy_pages(rabp, 0);
275 			VOP_STRATEGY(rabp);
276 		} else {
277 			brelse(rabp);
278 		}
279 	}
280 
281 	if (readwait) {
282 		rv = biowait(bp);
283 	}
284 	return (rv);
285 }
286 
287 /*
288  * Write, release buffer on completion.  (Done by iodone
289  * if async.)
290  */
291 int
292 bwrite(struct buf * bp)
293 {
294 	int oldflags = bp->b_flags;
295 
296 	if (bp->b_flags & B_INVAL) {
297 		brelse(bp);
298 		return (0);
299 	}
300 	if (!(bp->b_flags & B_BUSY))
301 		panic("bwrite: buffer is not busy???");
302 
303 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
304 	bp->b_flags |= B_WRITEINPROG;
305 
306 	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
307 		reassignbuf(bp, bp->b_vp);
308 	}
309 
310 	bp->b_vp->v_numoutput++;
311 	vfs_busy_pages(bp, 1);
312 	if (curproc != NULL)
313 		curproc->p_stats->p_ru.ru_oublock++;
314 	VOP_STRATEGY(bp);
315 
316 	/*
317 	 * Handle ordered writes here.
318 	 * If the write was originally flagged as ordered,
319 	 * then we check to see if it was converted to async.
320 	 * If it was converted to async, and is done now, then
321 	 * we release the buffer.  Otherwise we clear the
322 	 * ordered flag because it is not needed anymore.
323 	 *
324  	 * Note that biodone has been modified so that it does
325 	 * not release ordered buffers.  This allows us to have
326 	 * a chance to determine whether or not the driver
327 	 * has set the async flag in the strategy routine.  Otherwise
328 	 * if biodone was not modified, then the buffer may have been
329 	 * reused before we have had a chance to check the flag.
330 	 */
331 
332 	if ((oldflags & B_ORDERED) == B_ORDERED) {
333 		int s;
334 		s = splbio();
335 		if (bp->b_flags & B_ASYNC)  {
336 			if ((bp->b_flags & B_DONE)) {
337 				if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
338 					brelse(bp);
339 				else
340 					bqrelse(bp);
341 			}
342 			splx(s);
343 			return (0);
344 		} else {
345 			bp->b_flags &= ~B_ORDERED;
346 		}
347 		splx(s);
348 	}
349 
350 	if ((oldflags & B_ASYNC) == 0) {
351 		int rtval = biowait(bp);
352 
353 		if (oldflags & B_DELWRI) {
354 			reassignbuf(bp, bp->b_vp);
355 		}
356 		brelse(bp);
357 		return (rtval);
358 	}
359 	return (0);
360 }
361 
362 int
363 vn_bwrite(ap)
364 	struct vop_bwrite_args *ap;
365 {
366 	return (bwrite(ap->a_bp));
367 }
368 
369 /*
370  * Delayed write. (Buffer is marked dirty).
371  */
372 void
373 bdwrite(struct buf * bp)
374 {
375 
376 	if ((bp->b_flags & B_BUSY) == 0) {
377 		panic("bdwrite: buffer is not busy");
378 	}
379 	if (bp->b_flags & B_INVAL) {
380 		brelse(bp);
381 		return;
382 	}
383 	if (bp->b_flags & B_TAPE) {
384 		bawrite(bp);
385 		return;
386 	}
387 	bp->b_flags &= ~(B_READ|B_RELBUF);
388 	if ((bp->b_flags & B_DELWRI) == 0) {
389 		bp->b_flags |= B_DONE | B_DELWRI;
390 		reassignbuf(bp, bp->b_vp);
391 	}
392 
393 	/*
394 	 * This bmap keeps the system from needing to do the bmap later,
395 	 * perhaps when the system is attempting to do a sync.  Since it
396 	 * is likely that the indirect block -- or whatever other datastructure
397 	 * that the filesystem needs is still in memory now, it is a good
398 	 * thing to do this.  Note also, that if the pageout daemon is
399 	 * requesting a sync -- there might not be enough memory to do
400 	 * the bmap then...  So, this is important to do.
401 	 */
402 	if( bp->b_lblkno == bp->b_blkno) {
403 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
404 	}
405 
406 	/*
407 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
408 	 */
409 	vfs_setdirty(bp);
410 
411 	/*
412 	 * We need to do this here to satisfy the vnode_pager and the
413 	 * pageout daemon, so that it thinks that the pages have been
414 	 * "cleaned".  Note that since the pages are in a delayed write
415 	 * buffer -- the VFS layer "will" see that the pages get written
416 	 * out on the next sync, or perhaps the cluster will be completed.
417 	 */
418 	vfs_clean_pages(bp);
419 	bqrelse(bp);
420 	return;
421 }
422 
423 /*
424  * Asynchronous write.
425  * Start output on a buffer, but do not wait for it to complete.
426  * The buffer is released when the output completes.
427  */
428 void
429 bawrite(struct buf * bp)
430 {
431 	bp->b_flags |= B_ASYNC;
432 	(void) VOP_BWRITE(bp);
433 }
434 
435 /*
436  * Ordered write.
437  * Start output on a buffer, but only wait for it to complete if the
438  * output device cannot guarantee ordering in some other way.  Devices
439  * that can perform asynchronous ordered writes will set the B_ASYNC
440  * flag in their strategy routine.
441  * The buffer is released when the output completes.
442  */
443 int
444 bowrite(struct buf * bp)
445 {
446 	bp->b_flags |= B_ORDERED;
447 	return (VOP_BWRITE(bp));
448 }
449 
450 /*
451  * Release a buffer.
452  */
453 void
454 brelse(struct buf * bp)
455 {
456 	int s;
457 
458 	if (bp->b_flags & B_CLUSTER) {
459 		relpbuf(bp);
460 		return;
461 	}
462 	/* anyone need a "free" block? */
463 	s = splbio();
464 
465 	/* anyone need this block? */
466 	if (bp->b_flags & B_WANTED) {
467 		bp->b_flags &= ~B_WANTED;
468 		wakeup(bp);
469 	}
470 
471 	if (bp->b_flags & B_LOCKED)
472 		bp->b_flags &= ~B_ERROR;
473 
474 	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
475 	    (bp->b_bufsize <= 0)) {
476 		bp->b_flags |= B_INVAL;
477 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
478 		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
479 			if (bp->b_bufsize)
480 				allocbuf(bp, 0);
481 			brelvp(bp);
482 		}
483 	}
484 
485 	/*
486 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
487 	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
488 	 * but the VM object is kept around.  The B_NOCACHE flag is used to
489 	 * invalidate the pages in the VM object.
490 	 */
491 	if (bp->b_flags & B_VMIO) {
492 		vm_ooffset_t foff;
493 		vm_object_t obj;
494 		int i, resid;
495 		vm_page_t m;
496 		struct vnode *vp;
497 		int iototal = bp->b_bufsize;
498 
499 		vp = bp->b_vp;
500 		if (!vp)
501 			panic("brelse: missing vp");
502 
503 		if (bp->b_npages) {
504 			vm_pindex_t poff;
505 			obj = (vm_object_t) vp->v_object;
506 			if (vp->v_type == VBLK)
507 				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
508 			else
509 				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
510 			poff = OFF_TO_IDX(foff);
511 			for (i = 0; i < bp->b_npages; i++) {
512 				m = bp->b_pages[i];
513 				if (m == bogus_page) {
514 					m = vm_page_lookup(obj, poff + i);
515 					if (!m) {
516 						panic("brelse: page missing\n");
517 					}
518 					bp->b_pages[i] = m;
519 					pmap_qenter(trunc_page(bp->b_data),
520 						bp->b_pages, bp->b_npages);
521 				}
522 				resid = IDX_TO_OFF(m->pindex+1) - foff;
523 				if (resid > iototal)
524 					resid = iototal;
525 				if (resid > 0) {
526 					/*
527 					 * Don't invalidate the page if the local machine has already
528 					 * modified it.  This is the lesser of two evils, and should
529 					 * be fixed.
530 					 */
531 					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
532 						vm_page_test_dirty(m);
533 						if (m->dirty == 0) {
534 							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
535 							if (m->valid == 0)
536 								vm_page_protect(m, VM_PROT_NONE);
537 						}
538 					}
539 					if (resid >= PAGE_SIZE) {
540 						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
541 							bp->b_flags |= B_INVAL;
542 						}
543 					} else {
544 						if (!vm_page_is_valid(m,
545 							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
546 							bp->b_flags |= B_INVAL;
547 						}
548 					}
549 				}
550 				foff += resid;
551 				iototal -= resid;
552 			}
553 		}
554 		if (bp->b_flags & (B_INVAL | B_RELBUF))
555 			vfs_vmio_release(bp);
556 	}
557 	if (bp->b_qindex != QUEUE_NONE)
558 		panic("brelse: free buffer onto another queue???");
559 
560 	/* enqueue */
561 	/* buffers with no memory */
562 	if (bp->b_bufsize == 0) {
563 		bp->b_qindex = QUEUE_EMPTY;
564 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
565 		LIST_REMOVE(bp, b_hash);
566 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
567 		bp->b_dev = NODEV;
568 		if (needsbuffer) {
569 			wakeup(&needsbuffer);
570 			needsbuffer=0;
571 		}
572 		/* buffers with junk contents */
573 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
574 		bp->b_qindex = QUEUE_AGE;
575 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
576 		LIST_REMOVE(bp, b_hash);
577 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
578 		bp->b_dev = NODEV;
579 		if (needsbuffer) {
580 			wakeup(&needsbuffer);
581 			needsbuffer=0;
582 		}
583 		/* buffers that are locked */
584 	} else if (bp->b_flags & B_LOCKED) {
585 		bp->b_qindex = QUEUE_LOCKED;
586 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
587 		/* buffers with stale but valid contents */
588 	} else if (bp->b_flags & B_AGE) {
589 		bp->b_qindex = QUEUE_AGE;
590 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
591 		if (needsbuffer) {
592 			wakeup(&needsbuffer);
593 			needsbuffer=0;
594 		}
595 		/* buffers with valid and quite potentially reuseable contents */
596 	} else {
597 		bp->b_qindex = QUEUE_LRU;
598 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
599 		if (needsbuffer) {
600 			wakeup(&needsbuffer);
601 			needsbuffer=0;
602 		}
603 	}
604 
605 	/* unlock */
606 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
607 				B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
608 	splx(s);
609 }
610 
611 /*
612  * Release a buffer.
613  */
614 void
615 bqrelse(struct buf * bp)
616 {
617 	int s;
618 
619 	s = splbio();
620 
621 
622 	/* anyone need this block? */
623 	if (bp->b_flags & B_WANTED) {
624 		bp->b_flags &= ~(B_WANTED | B_AGE);
625 		wakeup(bp);
626 	}
627 
628 	if (bp->b_qindex != QUEUE_NONE)
629 		panic("bqrelse: free buffer onto another queue???");
630 
631 	if (bp->b_flags & B_LOCKED) {
632 		bp->b_flags &= ~B_ERROR;
633 		bp->b_qindex = QUEUE_LOCKED;
634 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
635 		/* buffers with stale but valid contents */
636 	} else {
637 		bp->b_qindex = QUEUE_LRU;
638 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
639 		if (needsbuffer) {
640 			wakeup(&needsbuffer);
641 			needsbuffer=0;
642 		}
643 	}
644 
645 	/* unlock */
646 	bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY |
647 		B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
648 	splx(s);
649 }
650 
651 static void
652 vfs_vmio_release(bp)
653 	struct buf *bp;
654 {
655 	int i;
656 	vm_page_t m;
657 
658 	for (i = 0; i < bp->b_npages; i++) {
659 		m = bp->b_pages[i];
660 		bp->b_pages[i] = NULL;
661 		if ((bp->b_flags & B_ASYNC) == 0) {
662 			while ((m->flags & PG_BUSY) || (m->busy != 0)) {
663 				m->flags |= PG_WANTED;
664 				tsleep(m, PVM, "vmiorl", 0);
665 			}
666 		}
667 
668 		vm_page_unwire(m);
669 
670 		if (m->wire_count == 0) {
671 
672 			if (m->flags & PG_WANTED) {
673 				m->flags &= ~PG_WANTED;
674 				wakeup(m);
675 			}
676 
677 			if (bp->b_flags & B_ASYNC) {
678 				if (m->hold_count == 0) {
679 					if ((m->flags & PG_BUSY) == 0 &&
680 						(m->busy == 0) &&
681 						(m->valid == 0)) {
682 						if(m->dirty == 0)
683 							vm_page_test_dirty(m);
684 						if (m->dirty == 0) {
685 							vm_page_protect(m, VM_PROT_NONE);
686 							vm_page_free(m);
687 						} else {
688 							pagedaemon_wakeup();
689 						}
690 					/*
691 					 * This is likely at interrupt time,
692 					 * and we cannot block here.
693 					 */
694 					} else if (cnt.v_free_count < cnt.v_free_min) {
695 						pagedaemon_wakeup();
696 					}
697 				}
698 				continue;
699 			}
700 
701 			if (m->valid) {
702 				if(m->dirty == 0)
703 					vm_page_test_dirty(m);
704 				/*
705 				 * this keeps pressure off of the process memory
706 				 */
707 				if ((vm_swap_size == 0) ||
708 					(cnt.v_free_count < cnt.v_free_min)) {
709 					if ((m->dirty == 0) &&
710 						(m->hold_count == 0))
711 						vm_page_cache(m);
712 					else
713 						vm_page_deactivate(m);
714 				}
715 			} else if (m->hold_count == 0) {
716 				vm_page_protect(m, VM_PROT_NONE);
717 				vm_page_free(m);
718 			}
719 		}
720 	}
721 	bufspace -= bp->b_bufsize;
722 	vmiospace -= bp->b_bufsize;
723 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
724 	bp->b_npages = 0;
725 	bp->b_bufsize = 0;
726 	bp->b_flags &= ~B_VMIO;
727 	if (bp->b_vp)
728 		brelvp(bp);
729 }
730 
731 /*
732  * Check to see if a block is currently memory resident.
733  */
734 __inline struct buf *
735 gbincore(struct vnode * vp, daddr_t blkno)
736 {
737 	struct buf *bp;
738 	struct bufhashhdr *bh;
739 
740 	bh = BUFHASH(vp, blkno);
741 	bp = bh->lh_first;
742 
743 	/* Search hash chain */
744 	while (bp != NULL) {
745 		/* hit */
746 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
747 		    (bp->b_flags & B_INVAL) == 0) {
748 			break;
749 		}
750 		bp = bp->b_hash.le_next;
751 	}
752 	return (bp);
753 }
754 
755 /*
756  * this routine implements clustered async writes for
757  * clearing out B_DELWRI buffers...  This is much better
758  * than the old way of writing only one buffer at a time.
759  */
760 int
761 vfs_bio_awrite(struct buf * bp)
762 {
763 	int i;
764 	daddr_t lblkno = bp->b_lblkno;
765 	struct vnode *vp = bp->b_vp;
766 	int s;
767 	int ncl;
768 	struct buf *bpa;
769 	int nwritten;
770 
771 	s = splbio();
772 	/*
773 	 * right now we support clustered writing only to regular files
774 	 */
775 	if ((vp->v_type == VREG) &&
776 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
777 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
778 		int size;
779 		int maxcl;
780 
781 		size = vp->v_mount->mnt_stat.f_iosize;
782 		maxcl = MAXPHYS / size;
783 
784 		for (i = 1; i < maxcl; i++) {
785 			if ((bpa = gbincore(vp, lblkno + i)) &&
786 			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
787 			    (B_DELWRI | B_CLUSTEROK)) &&
788 			    (bpa->b_bufsize == size)) {
789 				if ((bpa->b_blkno == bpa->b_lblkno) ||
790 				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
791 					break;
792 			} else {
793 				break;
794 			}
795 		}
796 		ncl = i;
797 		/*
798 		 * this is a possible cluster write
799 		 */
800 		if (ncl != 1) {
801 			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
802 			splx(s);
803 			return nwritten;
804 		}
805 	}
806 	bremfree(bp);
807 	splx(s);
808 	/*
809 	 * default (old) behavior, writing out only one block
810 	 */
811 	bp->b_flags |= B_BUSY | B_ASYNC;
812 	nwritten = bp->b_bufsize;
813 	(void) VOP_BWRITE(bp);
814 	return nwritten;
815 }
816 
817 
818 /*
819  * Find a buffer header which is available for use.
820  */
821 static struct buf *
822 getnewbuf(int slpflag, int slptimeo, int doingvmio)
823 {
824 	struct buf *bp;
825 	int nbyteswritten = 0;
826 
827 start:
828 	if (bufspace >= maxbufspace)
829 		goto trytofreespace;
830 
831 	/* can we constitute a new buffer? */
832 	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) {
833 		if (bp->b_qindex != QUEUE_EMPTY)
834 			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
835 			    bp->b_qindex);
836 		bp->b_flags |= B_BUSY;
837 		bremfree(bp);
838 		goto fillbuf;
839 	}
840 trytofreespace:
841 	/*
842 	 * We keep the file I/O from hogging metadata I/O
843 	 * This is desirable because file data is cached in the
844 	 * VM/Buffer cache even if a buffer is freed.
845 	 */
846 	if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) {
847 		if (bp->b_qindex != QUEUE_AGE)
848 			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
849 			    bp->b_qindex);
850 	} else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) {
851 		if (bp->b_qindex != QUEUE_LRU)
852 			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
853 			    bp->b_qindex);
854 	}
855 	if (!bp) {
856 		/* wait for a free buffer of any kind */
857 		needsbuffer = 1;
858 		tsleep(&needsbuffer,
859 			(PRIBIO + 1) | slpflag, "newbuf", slptimeo);
860 		return (0);
861 	}
862 
863 	/*
864 	 * We are fairly aggressive about freeing VMIO buffers, but since
865 	 * the buffering is intact without buffer headers, there is not
866 	 * much loss.  We gain by maintaining non-VMIOed metadata in buffers.
867 	 */
868 	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
869 		if ((bp->b_flags & B_VMIO) == 0 ||
870 			(vmiospace < maxvmiobufspace)) {
871 			--bp->b_usecount;
872 			TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
873 			if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) {
874 				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
875 				goto start;
876 			}
877 			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
878 		}
879 	}
880 
881 	/* if we are a delayed write, convert to an async write */
882 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
883 		nbyteswritten += vfs_bio_awrite(bp);
884 		if (!slpflag && !slptimeo) {
885 			return (0);
886 		}
887 		goto start;
888 	}
889 
890 	if (bp->b_flags & B_WANTED) {
891 		bp->b_flags &= ~B_WANTED;
892 		wakeup(bp);
893 	}
894 	bremfree(bp);
895 	bp->b_flags |= B_BUSY;
896 
897 	if (bp->b_flags & B_VMIO)
898 		vfs_vmio_release(bp);
899 
900 	if (bp->b_vp)
901 		brelvp(bp);
902 
903 fillbuf:
904 	/* we are not free, nor do we contain interesting data */
905 	if (bp->b_rcred != NOCRED) {
906 		crfree(bp->b_rcred);
907 		bp->b_rcred = NOCRED;
908 	}
909 	if (bp->b_wcred != NOCRED) {
910 		crfree(bp->b_wcred);
911 		bp->b_wcred = NOCRED;
912 	}
913 
914 	LIST_REMOVE(bp, b_hash);
915 	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
916 	if (bp->b_bufsize) {
917 		allocbuf(bp, 0);
918 	}
919 	bp->b_flags = B_BUSY;
920 	bp->b_dev = NODEV;
921 	bp->b_vp = NULL;
922 	bp->b_blkno = bp->b_lblkno = 0;
923 	bp->b_iodone = 0;
924 	bp->b_error = 0;
925 	bp->b_resid = 0;
926 	bp->b_bcount = 0;
927 	bp->b_npages = 0;
928 	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
929 	bp->b_dirtyoff = bp->b_dirtyend = 0;
930 	bp->b_validoff = bp->b_validend = 0;
931 	bp->b_usecount = 4;
932 	if (bufspace >= maxbufspace + nbyteswritten) {
933 		bp->b_flags |= B_INVAL;
934 		brelse(bp);
935 		goto trytofreespace;
936 	}
937 	return (bp);
938 }
939 
940 /*
941  * Check to see if a block is currently memory resident.
942  */
943 struct buf *
944 incore(struct vnode * vp, daddr_t blkno)
945 {
946 	struct buf *bp;
947 
948 	int s = splbio();
949 	bp = gbincore(vp, blkno);
950 	splx(s);
951 	return (bp);
952 }
953 
954 /*
955  * Returns true if no I/O is needed to access the
956  * associated VM object.  This is like incore except
957  * it also hunts around in the VM system for the data.
958  */
959 
960 int
961 inmem(struct vnode * vp, daddr_t blkno)
962 {
963 	vm_object_t obj;
964 	vm_offset_t toff, tinc;
965 	vm_page_t m;
966 	vm_ooffset_t off;
967 
968 	if (incore(vp, blkno))
969 		return 1;
970 	if (vp->v_mount == NULL)
971 		return 0;
972 	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
973 		return 0;
974 
975 	obj = vp->v_object;
976 	tinc = PAGE_SIZE;
977 	if (tinc > vp->v_mount->mnt_stat.f_iosize)
978 		tinc = vp->v_mount->mnt_stat.f_iosize;
979 	off = blkno * vp->v_mount->mnt_stat.f_iosize;
980 
981 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
982 
983 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
984 		if (!m)
985 			return 0;
986 		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
987 			return 0;
988 	}
989 	return 1;
990 }
991 
992 /*
993  * now we set the dirty range for the buffer --
994  * for NFS -- if the file is mapped and pages have
995  * been written to, let it know.  We want the
996  * entire range of the buffer to be marked dirty if
997  * any of the pages have been written to for consistancy
998  * with the b_validoff, b_validend set in the nfs write
999  * code, and used by the nfs read code.
1000  */
1001 static void
1002 vfs_setdirty(struct buf *bp) {
1003 	int i;
1004 	vm_object_t object;
1005 	vm_offset_t boffset, offset;
1006 	/*
1007 	 * We qualify the scan for modified pages on whether the
1008 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
1009 	 * is not cleared simply by protecting pages off.
1010 	 */
1011 	if ((bp->b_flags & B_VMIO) &&
1012 		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
1013 		/*
1014 		 * test the pages to see if they have been modified directly
1015 		 * by users through the VM system.
1016 		 */
1017 		for (i = 0; i < bp->b_npages; i++)
1018 			vm_page_test_dirty(bp->b_pages[i]);
1019 
1020 		/*
1021 		 * scan forwards for the first page modified
1022 		 */
1023 		for (i = 0; i < bp->b_npages; i++) {
1024 			if (bp->b_pages[i]->dirty) {
1025 				break;
1026 			}
1027 		}
1028 		boffset = (i << PAGE_SHIFT);
1029 		if (boffset < bp->b_dirtyoff) {
1030 			bp->b_dirtyoff = boffset;
1031 		}
1032 
1033 		/*
1034 		 * scan backwards for the last page modified
1035 		 */
1036 		for (i = bp->b_npages - 1; i >= 0; --i) {
1037 			if (bp->b_pages[i]->dirty) {
1038 				break;
1039 			}
1040 		}
1041 		boffset = (i + 1);
1042 		offset = boffset + bp->b_pages[0]->pindex;
1043 		if (offset >= object->size)
1044 			boffset = object->size - bp->b_pages[0]->pindex;
1045 		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
1046 			bp->b_dirtyend = (boffset << PAGE_SHIFT);
1047 	}
1048 }
1049 
1050 /*
1051  * Get a block given a specified block and offset into a file/device.
1052  */
1053 struct buf *
1054 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
1055 {
1056 	struct buf *bp;
1057 	int s;
1058 	struct bufhashhdr *bh;
1059 
1060 	s = splbio();
1061 loop:
1062 	if ((bp = gbincore(vp, blkno))) {
1063 		if (bp->b_flags & B_BUSY) {
1064 			bp->b_flags |= B_WANTED;
1065 			if (bp->b_usecount < BUF_MAXUSE)
1066 				++bp->b_usecount;
1067 			if (!tsleep(bp,
1068 				(PRIBIO + 1) | slpflag, "getblk", slptimeo))
1069 				goto loop;
1070 
1071 			splx(s);
1072 			return (struct buf *) NULL;
1073 		}
1074 		bp->b_flags |= B_BUSY | B_CACHE;
1075 		bremfree(bp);
1076 
1077 		/*
1078 		 * check for size inconsistancies (note that they shouldn't happen
1079 		 * but do when filesystems don't handle the size changes correctly.)
1080 		 * We are conservative on metadata and don't just extend the buffer
1081 		 * but write and re-constitute it.
1082 		 */
1083 
1084 		if (bp->b_bcount != size) {
1085 			if (bp->b_flags & B_VMIO) {
1086 				allocbuf(bp, size);
1087 			} else {
1088 				bp->b_flags |= B_NOCACHE;
1089 				VOP_BWRITE(bp);
1090 				goto loop;
1091 			}
1092 		}
1093 
1094 		if (bp->b_usecount < BUF_MAXUSE)
1095 			++bp->b_usecount;
1096 		splx(s);
1097 		return (bp);
1098 	} else {
1099 		vm_object_t obj;
1100 		int doingvmio;
1101 
1102 		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
1103 			doingvmio = 1;
1104 		} else {
1105 			doingvmio = 0;
1106 		}
1107 		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
1108 			if (slpflag || slptimeo) {
1109 				splx(s);
1110 				return NULL;
1111 			}
1112 			goto loop;
1113 		}
1114 
1115 		/*
1116 		 * This code is used to make sure that a buffer is not
1117 		 * created while the getnewbuf routine is blocked.
1118 		 * Normally the vnode is locked so this isn't a problem.
1119 		 * VBLK type I/O requests, however, don't lock the vnode.
1120 		 */
1121 		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1122 			bp->b_flags |= B_INVAL;
1123 			brelse(bp);
1124 			goto loop;
1125 		}
1126 
1127 		/*
1128 		 * Insert the buffer into the hash, so that it can
1129 		 * be found by incore.
1130 		 */
1131 		bp->b_blkno = bp->b_lblkno = blkno;
1132 		bgetvp(vp, bp);
1133 		LIST_REMOVE(bp, b_hash);
1134 		bh = BUFHASH(vp, blkno);
1135 		LIST_INSERT_HEAD(bh, bp, b_hash);
1136 
1137 		if (doingvmio) {
1138 			bp->b_flags |= (B_VMIO | B_CACHE);
1139 #if defined(VFS_BIO_DEBUG)
1140 			if (vp->v_type != VREG && vp->v_type != VBLK)
1141 				printf("getblk: vmioing file type %d???\n", vp->v_type);
1142 #endif
1143 		} else {
1144 			bp->b_flags &= ~B_VMIO;
1145 		}
1146 		splx(s);
1147 
1148 		allocbuf(bp, size);
1149 #ifdef	PC98
1150 		/*
1151 		 * 1024byte/sector support
1152 		 */
1153 #define B_XXX2 0x8000000
1154 		if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2;
1155 #endif
1156 		return (bp);
1157 	}
1158 }
1159 
1160 /*
1161  * Get an empty, disassociated buffer of given size.
1162  */
1163 struct buf *
1164 geteblk(int size)
1165 {
1166 	struct buf *bp;
1167 	int s;
1168 
1169 	s = splbio();
1170 	while ((bp = getnewbuf(0, 0, 0)) == 0);
1171 	splx(s);
1172 	allocbuf(bp, size);
1173 	bp->b_flags |= B_INVAL;
1174 	return (bp);
1175 }
1176 
1177 
1178 /*
1179  * This code constitutes the buffer memory from either anonymous system
1180  * memory (in the case of non-VMIO operations) or from an associated
1181  * VM object (in the case of VMIO operations).
1182  *
1183  * Note that this code is tricky, and has many complications to resolve
1184  * deadlock or inconsistant data situations.  Tread lightly!!!
1185  *
1186  * Modify the length of a buffer's underlying buffer storage without
1187  * destroying information (unless, of course the buffer is shrinking).
1188  */
1189 int
1190 allocbuf(struct buf * bp, int size)
1191 {
1192 
1193 	int s;
1194 	int newbsize, mbsize;
1195 	int i;
1196 
1197 	if (!(bp->b_flags & B_BUSY))
1198 		panic("allocbuf: buffer not busy");
1199 
1200 	if ((bp->b_flags & B_VMIO) == 0) {
1201 		caddr_t origbuf;
1202 		int origbufsize;
1203 		/*
1204 		 * Just get anonymous memory from the kernel
1205 		 */
1206 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1207 #if !defined(NO_B_MALLOC)
1208 		if (bp->b_flags & B_MALLOC)
1209 			newbsize = mbsize;
1210 		else
1211 #endif
1212 			newbsize = round_page(size);
1213 
1214 		if (newbsize < bp->b_bufsize) {
1215 #if !defined(NO_B_MALLOC)
1216 			/*
1217 			 * malloced buffers are not shrunk
1218 			 */
1219 			if (bp->b_flags & B_MALLOC) {
1220 				if (newbsize) {
1221 					bp->b_bcount = size;
1222 				} else {
1223 					free(bp->b_data, M_BIOBUF);
1224 					bufspace -= bp->b_bufsize;
1225 					bufmallocspace -= bp->b_bufsize;
1226 					bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
1227 					bp->b_bufsize = 0;
1228 					bp->b_bcount = 0;
1229 					bp->b_flags &= ~B_MALLOC;
1230 				}
1231 				return 1;
1232 			}
1233 #endif
1234 			vm_hold_free_pages(
1235 			    bp,
1236 			    (vm_offset_t) bp->b_data + newbsize,
1237 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1238 		} else if (newbsize > bp->b_bufsize) {
1239 #if !defined(NO_B_MALLOC)
1240 			/*
1241 			 * We only use malloced memory on the first allocation.
1242 			 * and revert to page-allocated memory when the buffer grows.
1243 			 */
1244 			if ( (bufmallocspace < maxbufmallocspace) &&
1245 				(bp->b_bufsize == 0) &&
1246 				(mbsize <= PAGE_SIZE/2)) {
1247 
1248 				bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK);
1249 				bp->b_bufsize = mbsize;
1250 				bp->b_bcount = size;
1251 				bp->b_flags |= B_MALLOC;
1252 				bufspace += mbsize;
1253 				bufmallocspace += mbsize;
1254 				return 1;
1255 			}
1256 #endif
1257 			origbuf = NULL;
1258 			origbufsize = 0;
1259 #if !defined(NO_B_MALLOC)
1260 			/*
1261 			 * If the buffer is growing on it's other-than-first allocation,
1262 			 * then we revert to the page-allocation scheme.
1263 			 */
1264 			if (bp->b_flags & B_MALLOC) {
1265 				origbuf = bp->b_data;
1266 				origbufsize = bp->b_bufsize;
1267 				bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
1268 				bufspace -= bp->b_bufsize;
1269 				bufmallocspace -= bp->b_bufsize;
1270 				bp->b_bufsize = 0;
1271 				bp->b_flags &= ~B_MALLOC;
1272 				newbsize = round_page(newbsize);
1273 			}
1274 #endif
1275 			vm_hold_load_pages(
1276 			    bp,
1277 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1278 			    (vm_offset_t) bp->b_data + newbsize);
1279 #if !defined(NO_B_MALLOC)
1280 			if (origbuf) {
1281 				bcopy(origbuf, bp->b_data, origbufsize);
1282 				free(origbuf, M_BIOBUF);
1283 			}
1284 #endif
1285 		}
1286 	} else {
1287 		vm_page_t m;
1288 		int desiredpages;
1289 
1290 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1291 		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1292 
1293 #if !defined(NO_B_MALLOC)
1294 		if (bp->b_flags & B_MALLOC)
1295 			panic("allocbuf: VMIO buffer can't be malloced");
1296 #endif
1297 
1298 		if (newbsize < bp->b_bufsize) {
1299 			if (desiredpages < bp->b_npages) {
1300 				for (i = desiredpages; i < bp->b_npages; i++) {
1301 					/*
1302 					 * the page is not freed here -- it
1303 					 * is the responsibility of vnode_pager_setsize
1304 					 */
1305 					m = bp->b_pages[i];
1306 					s = splvm();
1307 					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1308 						m->flags |= PG_WANTED;
1309 						tsleep(m, PVM, "biodep", 0);
1310 					}
1311 					splx(s);
1312 
1313 					bp->b_pages[i] = NULL;
1314 					vm_page_unwire(m);
1315 				}
1316 				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1317 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1318 				bp->b_npages = desiredpages;
1319 			}
1320 		} else if (newbsize > bp->b_bufsize) {
1321 			vm_object_t obj;
1322 			vm_offset_t tinc, toff;
1323 			vm_ooffset_t off;
1324 			vm_pindex_t objoff;
1325 			int pageindex, curbpnpages;
1326 			struct vnode *vp;
1327 			int bsize;
1328 
1329 			vp = bp->b_vp;
1330 
1331 			if (vp->v_type == VBLK)
1332 				bsize = DEV_BSIZE;
1333 			else
1334 				bsize = vp->v_mount->mnt_stat.f_iosize;
1335 
1336 			if (bp->b_npages < desiredpages) {
1337 				obj = vp->v_object;
1338 				tinc = PAGE_SIZE;
1339 				if (tinc > bsize)
1340 					tinc = bsize;
1341 				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1342 		doretry:
1343 				curbpnpages = bp->b_npages;
1344 				bp->b_flags |= B_CACHE;
1345 				for (toff = 0; toff < newbsize; toff += tinc) {
1346 					int bytesinpage;
1347 
1348 					pageindex = toff >> PAGE_SHIFT;
1349 					objoff = OFF_TO_IDX(off + toff);
1350 					if (pageindex < curbpnpages) {
1351 
1352 						m = bp->b_pages[pageindex];
1353 #ifdef VFS_BIO_DIAG
1354 						if (m->pindex != objoff)
1355 							panic("allocbuf: page changed offset??!!!?");
1356 #endif
1357 						bytesinpage = tinc;
1358 						if (tinc > (newbsize - toff))
1359 							bytesinpage = newbsize - toff;
1360 						if ((bp->b_flags & B_CACHE) &&
1361 							!vm_page_is_valid(m,
1362 							(vm_offset_t) ((toff + off) & PAGE_MASK),
1363 							bytesinpage)) {
1364 							bp->b_flags &= ~B_CACHE;
1365 						}
1366 						continue;
1367 					}
1368 					m = vm_page_lookup(obj, objoff);
1369 					if (!m) {
1370 						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1371 						if (!m) {
1372 							VM_WAIT;
1373 							goto doretry;
1374 						}
1375 						/*
1376 						 * Normally it is unwise to clear PG_BUSY without
1377 						 * PAGE_WAKEUP -- but it is okay here, as there is
1378 						 * no chance for blocking between here and vm_page_alloc
1379 						 */
1380 						m->flags &= ~PG_BUSY;
1381 						vm_page_wire(m);
1382 						bp->b_flags &= ~B_CACHE;
1383 					} else if (m->flags & PG_BUSY) {
1384 						s = splvm();
1385 						if (m->flags & PG_BUSY) {
1386 							m->flags |= PG_WANTED;
1387 							tsleep(m, PVM, "pgtblk", 0);
1388 						}
1389 						splx(s);
1390 						goto doretry;
1391 					} else {
1392 						if ((curproc != pageproc) &&
1393 							((m->queue - m->pc) == PQ_CACHE) &&
1394 						    ((cnt.v_free_count + cnt.v_cache_count) <
1395 								(cnt.v_free_min + cnt.v_cache_min))) {
1396 							pagedaemon_wakeup();
1397 						}
1398 						bytesinpage = tinc;
1399 						if (tinc > (newbsize - toff))
1400 							bytesinpage = newbsize - toff;
1401 						if ((bp->b_flags & B_CACHE) &&
1402 							!vm_page_is_valid(m,
1403 							(vm_offset_t) ((toff + off) & PAGE_MASK),
1404 							bytesinpage)) {
1405 							bp->b_flags &= ~B_CACHE;
1406 						}
1407 						vm_page_wire(m);
1408 					}
1409 					bp->b_pages[pageindex] = m;
1410 					curbpnpages = pageindex + 1;
1411 				}
1412 				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1413 				bp->b_npages = curbpnpages;
1414 				pmap_qenter((vm_offset_t) bp->b_data,
1415 					bp->b_pages, bp->b_npages);
1416 				((vm_offset_t) bp->b_data) |= off & PAGE_MASK;
1417 			}
1418 		}
1419 	}
1420 	if (bp->b_flags & B_VMIO)
1421 		vmiospace += bp->b_bufsize;
1422 	bufspace += (newbsize - bp->b_bufsize);
1423 	bp->b_bufsize = newbsize;
1424 	bp->b_bcount = size;
1425 	return 1;
1426 }
1427 
1428 /*
1429  * Wait for buffer I/O completion, returning error status.
1430  */
1431 int
1432 biowait(register struct buf * bp)
1433 {
1434 	int s;
1435 
1436 	s = splbio();
1437 	while ((bp->b_flags & B_DONE) == 0)
1438 		tsleep(bp, PRIBIO, "biowait", 0);
1439 	splx(s);
1440 	if (bp->b_flags & B_EINTR) {
1441 		bp->b_flags &= ~B_EINTR;
1442 		return (EINTR);
1443 	}
1444 	if (bp->b_flags & B_ERROR) {
1445 		return (bp->b_error ? bp->b_error : EIO);
1446 	} else {
1447 		return (0);
1448 	}
1449 }
1450 
1451 /*
1452  * Finish I/O on a buffer, calling an optional function.
1453  * This is usually called from interrupt level, so process blocking
1454  * is not *a good idea*.
1455  */
1456 void
1457 biodone(register struct buf * bp)
1458 {
1459 	int s;
1460 
1461 	s = splbio();
1462 	if (!(bp->b_flags & B_BUSY))
1463 		panic("biodone: buffer not busy");
1464 
1465 	if (bp->b_flags & B_DONE) {
1466 		splx(s);
1467 		printf("biodone: buffer already done\n");
1468 		return;
1469 	}
1470 	bp->b_flags |= B_DONE;
1471 
1472 	if ((bp->b_flags & B_READ) == 0) {
1473 		vwakeup(bp);
1474 	}
1475 #ifdef BOUNCE_BUFFERS
1476 	if (bp->b_flags & B_BOUNCE)
1477 		vm_bounce_free(bp);
1478 #endif
1479 
1480 	/* call optional completion function if requested */
1481 	if (bp->b_flags & B_CALL) {
1482 		bp->b_flags &= ~B_CALL;
1483 		(*bp->b_iodone) (bp);
1484 		splx(s);
1485 		return;
1486 	}
1487 	if (bp->b_flags & B_VMIO) {
1488 		int i, resid;
1489 		vm_ooffset_t foff;
1490 		vm_page_t m;
1491 		vm_object_t obj;
1492 		int iosize;
1493 		struct vnode *vp = bp->b_vp;
1494 
1495 		if (vp->v_type == VBLK)
1496 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1497 		else
1498 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1499 		obj = vp->v_object;
1500 		if (!obj) {
1501 			panic("biodone: no object");
1502 		}
1503 #if defined(VFS_BIO_DEBUG)
1504 		if (obj->paging_in_progress < bp->b_npages) {
1505 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1506 			    obj->paging_in_progress, bp->b_npages);
1507 		}
1508 #endif
1509 		iosize = bp->b_bufsize;
1510 		for (i = 0; i < bp->b_npages; i++) {
1511 			int bogusflag = 0;
1512 			m = bp->b_pages[i];
1513 			if (m == bogus_page) {
1514 				bogusflag = 1;
1515 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1516 				if (!m) {
1517 #if defined(VFS_BIO_DEBUG)
1518 					printf("biodone: page disappeared\n");
1519 #endif
1520 					--obj->paging_in_progress;
1521 					continue;
1522 				}
1523 				bp->b_pages[i] = m;
1524 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1525 			}
1526 #if defined(VFS_BIO_DEBUG)
1527 			if (OFF_TO_IDX(foff) != m->pindex) {
1528 				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1529 			}
1530 #endif
1531 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1532 			if (resid > iosize)
1533 				resid = iosize;
1534 			/*
1535 			 * In the write case, the valid and clean bits are
1536 			 * already changed correctly, so we only need to do this
1537 			 * here in the read case.
1538 			 */
1539 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1540 				vm_page_set_validclean(m,
1541 					(vm_offset_t) (foff & PAGE_MASK), resid);
1542 			}
1543 
1544 			/*
1545 			 * when debugging new filesystems or buffer I/O methods, this
1546 			 * is the most common error that pops up.  if you see this, you
1547 			 * have not set the page busy flag correctly!!!
1548 			 */
1549 			if (m->busy == 0) {
1550 				printf("biodone: page busy < 0, "
1551 				    "pindex: %d, foff: 0x(%x,%x), "
1552 				    "resid: %d, index: %d\n",
1553 				    (int) m->pindex, (int)(foff >> 32),
1554 						(int) foff & 0xffffffff, resid, i);
1555 				if (vp->v_type != VBLK)
1556 					printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n",
1557 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1558 					    (int) bp->b_lblkno,
1559 					    bp->b_flags, bp->b_npages);
1560 				else
1561 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1562 					    (int) bp->b_lblkno,
1563 					    bp->b_flags, bp->b_npages);
1564 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1565 				    m->valid, m->dirty, m->wire_count);
1566 				panic("biodone: page busy < 0\n");
1567 			}
1568 			--m->busy;
1569 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1570 				m->flags &= ~PG_WANTED;
1571 				wakeup(m);
1572 			}
1573 			--obj->paging_in_progress;
1574 			foff += resid;
1575 			iosize -= resid;
1576 		}
1577 		if (obj && obj->paging_in_progress == 0 &&
1578 		    (obj->flags & OBJ_PIPWNT)) {
1579 			obj->flags &= ~OBJ_PIPWNT;
1580 			wakeup(obj);
1581 		}
1582 	}
1583 	/*
1584 	 * For asynchronous completions, release the buffer now. The brelse
1585 	 * checks for B_WANTED and will do the wakeup there if necessary - so
1586 	 * no need to do a wakeup here in the async case.
1587 	 */
1588 
1589 	if (bp->b_flags & B_ASYNC) {
1590 		if ((bp->b_flags & B_ORDERED) == 0) {
1591 			if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1592 				brelse(bp);
1593 			else
1594 				bqrelse(bp);
1595 		}
1596 	} else {
1597 		bp->b_flags &= ~B_WANTED;
1598 		wakeup(bp);
1599 	}
1600 	splx(s);
1601 }
1602 
1603 int
1604 count_lock_queue()
1605 {
1606 	int count;
1607 	struct buf *bp;
1608 
1609 	count = 0;
1610 	for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]);
1611 	    bp != NULL;
1612 	    bp = TAILQ_NEXT(bp, b_freelist))
1613 		count++;
1614 	return (count);
1615 }
1616 
1617 int vfs_update_interval = 30;
1618 
1619 static void
1620 vfs_update()
1621 {
1622 	(void) spl0();		/* XXX redundant?  wrong place? */
1623 	while (1) {
1624 		tsleep(&vfs_update_wakeup, PUSER, "update",
1625 		    hz * vfs_update_interval);
1626 		vfs_update_wakeup = 0;
1627 		sync(curproc, NULL, NULL);
1628 	}
1629 }
1630 
1631 static int
1632 sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
1633 {
1634 	int error = sysctl_handle_int(oidp,
1635 		oidp->oid_arg1, oidp->oid_arg2, req);
1636 	if (!error)
1637 		wakeup(&vfs_update_wakeup);
1638 	return error;
1639 }
1640 
1641 SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
1642 	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
1643 
1644 
1645 /*
1646  * This routine is called in lieu of iodone in the case of
1647  * incomplete I/O.  This keeps the busy status for pages
1648  * consistant.
1649  */
1650 void
1651 vfs_unbusy_pages(struct buf * bp)
1652 {
1653 	int i;
1654 
1655 	if (bp->b_flags & B_VMIO) {
1656 		struct vnode *vp = bp->b_vp;
1657 		vm_object_t obj = vp->v_object;
1658 		vm_ooffset_t foff;
1659 
1660 		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1661 
1662 		for (i = 0; i < bp->b_npages; i++) {
1663 			vm_page_t m = bp->b_pages[i];
1664 
1665 			if (m == bogus_page) {
1666 				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1667 				if (!m) {
1668 					panic("vfs_unbusy_pages: page missing\n");
1669 				}
1670 				bp->b_pages[i] = m;
1671 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1672 			}
1673 			--obj->paging_in_progress;
1674 			--m->busy;
1675 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1676 				m->flags &= ~PG_WANTED;
1677 				wakeup(m);
1678 			}
1679 		}
1680 		if (obj->paging_in_progress == 0 &&
1681 		    (obj->flags & OBJ_PIPWNT)) {
1682 			obj->flags &= ~OBJ_PIPWNT;
1683 			wakeup(obj);
1684 		}
1685 	}
1686 }
1687 
1688 /*
1689  * This routine is called before a device strategy routine.
1690  * It is used to tell the VM system that paging I/O is in
1691  * progress, and treat the pages associated with the buffer
1692  * almost as being PG_BUSY.  Also the object paging_in_progress
1693  * flag is handled to make sure that the object doesn't become
1694  * inconsistant.
1695  */
1696 void
1697 vfs_busy_pages(struct buf * bp, int clear_modify)
1698 {
1699 	int i;
1700 
1701 	if (bp->b_flags & B_VMIO) {
1702 		vm_object_t obj = bp->b_vp->v_object;
1703 		vm_ooffset_t foff;
1704 		int iocount = bp->b_bufsize;
1705 
1706 		if (bp->b_vp->v_type == VBLK)
1707 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1708 		else
1709 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1710 		vfs_setdirty(bp);
1711 		for (i = 0; i < bp->b_npages; i++) {
1712 			vm_page_t m = bp->b_pages[i];
1713 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1714 
1715 			if (resid > iocount)
1716 				resid = iocount;
1717 			if ((bp->b_flags & B_CLUSTER) == 0) {
1718 				obj->paging_in_progress++;
1719 				m->busy++;
1720 			}
1721 			vm_page_protect(m, VM_PROT_NONE);
1722 			if (clear_modify) {
1723 				vm_page_set_validclean(m,
1724 					(vm_offset_t) (foff & PAGE_MASK), resid);
1725 			} else if (bp->b_bcount >= PAGE_SIZE) {
1726 				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1727 					bp->b_pages[i] = bogus_page;
1728 					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1729 				}
1730 			}
1731 			foff += resid;
1732 			iocount -= resid;
1733 		}
1734 	}
1735 }
1736 
1737 /*
1738  * Tell the VM system that the pages associated with this buffer
1739  * are clean.  This is used for delayed writes where the data is
1740  * going to go to disk eventually without additional VM intevention.
1741  */
1742 void
1743 vfs_clean_pages(struct buf * bp)
1744 {
1745 	int i;
1746 
1747 	if (bp->b_flags & B_VMIO) {
1748 		vm_ooffset_t foff;
1749 		int iocount = bp->b_bufsize;
1750 
1751 		if (bp->b_vp->v_type == VBLK)
1752 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1753 		else
1754 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1755 
1756 		for (i = 0; i < bp->b_npages; i++) {
1757 			vm_page_t m = bp->b_pages[i];
1758 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1759 
1760 			if (resid > iocount)
1761 				resid = iocount;
1762 			if (resid > 0) {
1763 				vm_page_set_validclean(m,
1764 					((vm_offset_t) foff & PAGE_MASK), resid);
1765 			}
1766 			foff += resid;
1767 			iocount -= resid;
1768 		}
1769 	}
1770 }
1771 
1772 void
1773 vfs_bio_clrbuf(struct buf *bp) {
1774 	int i;
1775 	if( bp->b_flags & B_VMIO) {
1776 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1777 			int mask;
1778 			mask = 0;
1779 			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
1780 				mask |= (1 << (i/DEV_BSIZE));
1781 			if( bp->b_pages[0]->valid != mask) {
1782 				bzero(bp->b_data, bp->b_bufsize);
1783 			}
1784 			bp->b_pages[0]->valid = mask;
1785 			bp->b_resid = 0;
1786 			return;
1787 		}
1788 		for(i=0;i<bp->b_npages;i++) {
1789 			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1790 				continue;
1791 			if( bp->b_pages[i]->valid == 0) {
1792 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
1793 					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
1794 				}
1795 			} else {
1796 				int j;
1797 				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1798 					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1799 						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
1800 				}
1801 			}
1802 			/* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */
1803 		}
1804 		bp->b_resid = 0;
1805 	} else {
1806 		clrbuf(bp);
1807 	}
1808 }
1809 
1810 /*
1811  * vm_hold_load_pages and vm_hold_unload pages get pages into
1812  * a buffers address space.  The pages are anonymous and are
1813  * not associated with a file object.
1814  */
1815 void
1816 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1817 {
1818 	vm_offset_t pg;
1819 	vm_page_t p;
1820 	int index;
1821 
1822 	to = round_page(to);
1823 	from = round_page(from);
1824 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1825 
1826 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1827 
1828 tryagain:
1829 
1830 		p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
1831 		    VM_ALLOC_NORMAL);
1832 		if (!p) {
1833 			VM_WAIT;
1834 			goto tryagain;
1835 		}
1836 		vm_page_wire(p);
1837 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1838 		bp->b_pages[index] = p;
1839 		PAGE_WAKEUP(p);
1840 	}
1841 	bp->b_npages = to >> PAGE_SHIFT;
1842 }
1843 
1844 void
1845 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1846 {
1847 	vm_offset_t pg;
1848 	vm_page_t p;
1849 	int index;
1850 
1851 	from = round_page(from);
1852 	to = round_page(to);
1853 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1854 
1855 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1856 		p = bp->b_pages[index];
1857 		if (p && (index < bp->b_npages)) {
1858 			if (p->busy) {
1859 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
1860 					bp->b_blkno, bp->b_lblkno);
1861 			}
1862 			bp->b_pages[index] = NULL;
1863 			pmap_kremove(pg);
1864 			vm_page_unwire(p);
1865 			vm_page_free(p);
1866 		}
1867 	}
1868 	bp->b_npages = from >> PAGE_SHIFT;
1869 }
1870