xref: /freebsd/sys/kern/vfs_bio.c (revision 05c7a37afb48ddd5ee1bd921a5d46fe59cc70b15)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. This work was done expressly for inclusion into FreeBSD.  Other use
17  *    is allowed if this notation is included.
18  * 5. Modifications may be freely made to this file if the above conditions
19  *    are met.
20  *
21  * $Id: vfs_bio.c,v 1.87 1996/03/03 01:04:28 dyson Exp $
22  */
23 
24 /*
25  * this file contains a new buffer I/O scheme implementing a coherent
26  * VM object and buffer cache scheme.  Pains have been taken to make
27  * sure that the performance degradation associated with schemes such
28  * as this is not realized.
29  *
30  * Author:  John S. Dyson
31  * Significant help during the development and debugging phases
32  * had been provided by David Greenman, also of the FreeBSD core team.
33  */
34 
35 #include "opt_bounce.h"
36 
37 #define VMIO
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/vmmeter.h>
46 #include <vm/vm.h>
47 #include <vm/vm_param.h>
48 #include <vm/vm_prot.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_pageout.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_extern.h>
54 #include <sys/buf.h>
55 #include <sys/mount.h>
56 #include <sys/malloc.h>
57 #include <sys/resourcevar.h>
58 #include <sys/proc.h>
59 
60 #include <miscfs/specfs/specdev.h>
61 
62 static void vfs_update __P((void));
63 static struct	proc *updateproc;
64 static struct kproc_desc up_kp = {
65 	"update",
66 	vfs_update,
67 	&updateproc
68 };
69 SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
70 
71 struct buf *buf;		/* buffer header pool */
72 struct swqueue bswlist;
73 
74 int count_lock_queue __P((void));
75 static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
76 		vm_offset_t to);
77 static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
78 		vm_offset_t to);
79 static void vfs_clean_pages(struct buf * bp);
80 static void vfs_setdirty(struct buf *bp);
81 static void vfs_vmio_release(struct buf *bp);
82 
83 int needsbuffer;
84 
85 /*
86  * Internal update daemon, process 3
87  *	The variable vfs_update_wakeup allows for internal syncs.
88  */
89 int vfs_update_wakeup;
90 
91 
92 /*
93  * buffers base kva
94  */
95 caddr_t buffers_kva;
96 
97 /*
98  * bogus page -- for I/O to/from partially complete buffers
99  * this is a temporary solution to the problem, but it is not
100  * really that bad.  it would be better to split the buffer
101  * for input in the case of buffers partially already in memory,
102  * but the code is intricate enough already.
103  */
104 vm_page_t bogus_page;
105 static vm_offset_t bogus_offset;
106 
107 static int bufspace, maxbufspace, vmiospace, maxvmiobufspace,
108 	bufmallocspace, maxbufmallocspace;
109 
110 static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash;
111 static struct bqueues bufqueues[BUFFER_QUEUES];
112 
113 extern int vm_swap_size;
114 
115 #define BUF_MAXUSE 8
116 
117 /*
118  * Initialize buffer headers and related structures.
119  */
120 void
121 bufinit()
122 {
123 	struct buf *bp;
124 	int i;
125 
126 	TAILQ_INIT(&bswlist);
127 	LIST_INIT(&invalhash);
128 
129 	/* first, make a null hash table */
130 	for (i = 0; i < BUFHSZ; i++)
131 		LIST_INIT(&bufhashtbl[i]);
132 
133 	/* next, make a null set of free lists */
134 	for (i = 0; i < BUFFER_QUEUES; i++)
135 		TAILQ_INIT(&bufqueues[i]);
136 
137 	buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf);
138 	/* finally, initialize each buffer header and stick on empty q */
139 	for (i = 0; i < nbuf; i++) {
140 		bp = &buf[i];
141 		bzero(bp, sizeof *bp);
142 		bp->b_flags = B_INVAL;	/* we're just an empty header */
143 		bp->b_dev = NODEV;
144 		bp->b_rcred = NOCRED;
145 		bp->b_wcred = NOCRED;
146 		bp->b_qindex = QUEUE_EMPTY;
147 		bp->b_vnbufs.le_next = NOLIST;
148 		bp->b_data = buffers_kva + i * MAXBSIZE;
149 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
150 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
151 	}
152 /*
153  * maxbufspace is currently calculated to support all filesystem blocks
154  * to be 8K.  If you happen to use a 16K filesystem, the size of the buffer
155  * cache is still the same as it would be for 8K filesystems.  This
156  * keeps the size of the buffer cache "in check" for big block filesystems.
157  */
158 	maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE;
159 /*
160  * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed
161  */
162 	maxvmiobufspace = 2 * maxbufspace / 3;
163 /*
164  * Limit the amount of malloc memory since it is wired permanently into
165  * the kernel space.  Even though this is accounted for in the buffer
166  * allocation, we don't want the malloced region to grow uncontrolled.
167  * The malloc scheme improves memory utilization significantly on average
168  * (small) directories.
169  */
170 	maxbufmallocspace = maxbufspace / 20;
171 
172 	bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE);
173 	bogus_page = vm_page_alloc(kernel_object,
174 			((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
175 			VM_ALLOC_NORMAL);
176 
177 }
178 
179 /*
180  * remove the buffer from the appropriate free list
181  */
182 void
183 bremfree(struct buf * bp)
184 {
185 	int s = splbio();
186 
187 	if (bp->b_qindex != QUEUE_NONE) {
188 		TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist);
189 		bp->b_qindex = QUEUE_NONE;
190 	} else {
191 		panic("bremfree: removing a buffer when not on a queue");
192 	}
193 	splx(s);
194 }
195 
196 /*
197  * Get a buffer with the specified data.  Look in the cache first.
198  */
199 int
200 bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
201     struct buf ** bpp)
202 {
203 	struct buf *bp;
204 
205 	bp = getblk(vp, blkno, size, 0, 0);
206 	*bpp = bp;
207 
208 	/* if not found in cache, do some I/O */
209 	if ((bp->b_flags & B_CACHE) == 0) {
210 		if (curproc != NULL)
211 			curproc->p_stats->p_ru.ru_inblock++;
212 		bp->b_flags |= B_READ;
213 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
214 		if (bp->b_rcred == NOCRED) {
215 			if (cred != NOCRED)
216 				crhold(cred);
217 			bp->b_rcred = cred;
218 		}
219 		vfs_busy_pages(bp, 0);
220 		VOP_STRATEGY(bp);
221 		return (biowait(bp));
222 	}
223 	return (0);
224 }
225 
226 /*
227  * Operates like bread, but also starts asynchronous I/O on
228  * read-ahead blocks.
229  */
230 int
231 breadn(struct vnode * vp, daddr_t blkno, int size,
232     daddr_t * rablkno, int *rabsize,
233     int cnt, struct ucred * cred, struct buf ** bpp)
234 {
235 	struct buf *bp, *rabp;
236 	int i;
237 	int rv = 0, readwait = 0;
238 
239 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
240 
241 	/* if not found in cache, do some I/O */
242 	if ((bp->b_flags & B_CACHE) == 0) {
243 		if (curproc != NULL)
244 			curproc->p_stats->p_ru.ru_inblock++;
245 		bp->b_flags |= B_READ;
246 		bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
247 		if (bp->b_rcred == NOCRED) {
248 			if (cred != NOCRED)
249 				crhold(cred);
250 			bp->b_rcred = cred;
251 		}
252 		vfs_busy_pages(bp, 0);
253 		VOP_STRATEGY(bp);
254 		++readwait;
255 	}
256 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
257 		if (inmem(vp, *rablkno))
258 			continue;
259 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0);
260 
261 		if ((rabp->b_flags & B_CACHE) == 0) {
262 			if (curproc != NULL)
263 				curproc->p_stats->p_ru.ru_inblock++;
264 			rabp->b_flags |= B_READ | B_ASYNC;
265 			rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
266 			if (rabp->b_rcred == NOCRED) {
267 				if (cred != NOCRED)
268 					crhold(cred);
269 				rabp->b_rcred = cred;
270 			}
271 			vfs_busy_pages(rabp, 0);
272 			VOP_STRATEGY(rabp);
273 		} else {
274 			brelse(rabp);
275 		}
276 	}
277 
278 	if (readwait) {
279 		rv = biowait(bp);
280 	}
281 	return (rv);
282 }
283 
284 /*
285  * Write, release buffer on completion.  (Done by iodone
286  * if async.)
287  */
288 int
289 bwrite(struct buf * bp)
290 {
291 	int oldflags = bp->b_flags;
292 
293 	if (bp->b_flags & B_INVAL) {
294 		brelse(bp);
295 		return (0);
296 	}
297 	if (!(bp->b_flags & B_BUSY))
298 		panic("bwrite: buffer is not busy???");
299 
300 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
301 	bp->b_flags |= B_WRITEINPROG;
302 
303 	if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) {
304 		reassignbuf(bp, bp->b_vp);
305 	}
306 
307 	bp->b_vp->v_numoutput++;
308 	vfs_busy_pages(bp, 1);
309 	if (curproc != NULL)
310 		curproc->p_stats->p_ru.ru_oublock++;
311 	VOP_STRATEGY(bp);
312 
313 	if ((oldflags & B_ASYNC) == 0) {
314 		int rtval = biowait(bp);
315 
316 		if (oldflags & B_DELWRI) {
317 			reassignbuf(bp, bp->b_vp);
318 		}
319 		brelse(bp);
320 		return (rtval);
321 	}
322 	return (0);
323 }
324 
325 int
326 vn_bwrite(ap)
327 	struct vop_bwrite_args *ap;
328 {
329 	return (bwrite(ap->a_bp));
330 }
331 
332 /*
333  * Delayed write. (Buffer is marked dirty).
334  */
335 void
336 bdwrite(struct buf * bp)
337 {
338 
339 	if ((bp->b_flags & B_BUSY) == 0) {
340 		panic("bdwrite: buffer is not busy");
341 	}
342 	if (bp->b_flags & B_INVAL) {
343 		brelse(bp);
344 		return;
345 	}
346 	if (bp->b_flags & B_TAPE) {
347 		bawrite(bp);
348 		return;
349 	}
350 	bp->b_flags &= ~(B_READ|B_RELBUF);
351 	if ((bp->b_flags & B_DELWRI) == 0) {
352 		bp->b_flags |= B_DONE | B_DELWRI;
353 		reassignbuf(bp, bp->b_vp);
354 	}
355 
356 	/*
357 	 * This bmap keeps the system from needing to do the bmap later,
358 	 * perhaps when the system is attempting to do a sync.  Since it
359 	 * is likely that the indirect block -- or whatever other datastructure
360 	 * that the filesystem needs is still in memory now, it is a good
361 	 * thing to do this.  Note also, that if the pageout daemon is
362 	 * requesting a sync -- there might not be enough memory to do
363 	 * the bmap then...  So, this is important to do.
364 	 */
365 	if( bp->b_lblkno == bp->b_blkno) {
366 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
367 	}
368 
369 	/*
370 	 * Set the *dirty* buffer range based upon the VM system dirty pages.
371 	 */
372 	vfs_setdirty(bp);
373 
374 	/*
375 	 * We need to do this here to satisfy the vnode_pager and the
376 	 * pageout daemon, so that it thinks that the pages have been
377 	 * "cleaned".  Note that since the pages are in a delayed write
378 	 * buffer -- the VFS layer "will" see that the pages get written
379 	 * out on the next sync, or perhaps the cluster will be completed.
380 	 */
381 	vfs_clean_pages(bp);
382 	bqrelse(bp);
383 	return;
384 }
385 
386 /*
387  * Asynchronous write.
388  * Start output on a buffer, but do not wait for it to complete.
389  * The buffer is released when the output completes.
390  */
391 void
392 bawrite(struct buf * bp)
393 {
394 	bp->b_flags |= B_ASYNC;
395 	(void) VOP_BWRITE(bp);
396 }
397 
398 /*
399  * Release a buffer.
400  */
401 void
402 brelse(struct buf * bp)
403 {
404 	int s;
405 
406 	if (bp->b_flags & B_CLUSTER) {
407 		relpbuf(bp);
408 		return;
409 	}
410 	/* anyone need a "free" block? */
411 	s = splbio();
412 
413 	/* anyone need this block? */
414 	if (bp->b_flags & B_WANTED) {
415 		bp->b_flags &= ~(B_WANTED | B_AGE);
416 		wakeup(bp);
417 	}
418 
419 	if (bp->b_flags & B_LOCKED)
420 		bp->b_flags &= ~B_ERROR;
421 
422 	if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) ||
423 	    (bp->b_bufsize <= 0)) {
424 		bp->b_flags |= B_INVAL;
425 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
426 		if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) {
427 			if (bp->b_bufsize)
428 				allocbuf(bp, 0);
429 			brelvp(bp);
430 		}
431 	}
432 
433 	/*
434 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
435 	 * constituted, so the B_INVAL flag is used to *invalidate* the buffer,
436 	 * but the VM object is kept around.  The B_NOCACHE flag is used to
437 	 * invalidate the pages in the VM object.
438 	 */
439 	if (bp->b_flags & B_VMIO) {
440 		vm_ooffset_t foff;
441 		vm_object_t obj;
442 		int i, resid;
443 		vm_page_t m;
444 		struct vnode *vp;
445 		int iototal = bp->b_bufsize;
446 
447 		vp = bp->b_vp;
448 		if (!vp)
449 			panic("brelse: missing vp");
450 
451 		if (bp->b_npages) {
452 			vm_pindex_t poff;
453 			obj = (vm_object_t) vp->v_object;
454 			if (vp->v_type == VBLK)
455 				foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT;
456 			else
457 				foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
458 			poff = OFF_TO_IDX(foff);
459 			for (i = 0; i < bp->b_npages; i++) {
460 				m = bp->b_pages[i];
461 				if (m == bogus_page) {
462 					m = vm_page_lookup(obj, poff + i);
463 					if (!m) {
464 						panic("brelse: page missing\n");
465 					}
466 					bp->b_pages[i] = m;
467 					pmap_qenter(trunc_page(bp->b_data),
468 						bp->b_pages, bp->b_npages);
469 				}
470 				resid = IDX_TO_OFF(m->pindex+1) - foff;
471 				if (resid > iototal)
472 					resid = iototal;
473 				if (resid > 0) {
474 					/*
475 					 * Don't invalidate the page if the local machine has already
476 					 * modified it.  This is the lesser of two evils, and should
477 					 * be fixed.
478 					 */
479 					if (bp->b_flags & (B_NOCACHE | B_ERROR)) {
480 						vm_page_test_dirty(m);
481 						if (m->dirty == 0) {
482 							vm_page_set_invalid(m, (vm_offset_t) foff, resid);
483 							if (m->valid == 0)
484 								vm_page_protect(m, VM_PROT_NONE);
485 						}
486 					}
487 					if (resid >= PAGE_SIZE) {
488 						if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
489 							bp->b_flags |= B_INVAL;
490 						}
491 					} else {
492 						if (!vm_page_is_valid(m,
493 							(((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) {
494 							bp->b_flags |= B_INVAL;
495 						}
496 					}
497 				}
498 				foff += resid;
499 				iototal -= resid;
500 			}
501 		}
502 		if (bp->b_flags & (B_INVAL | B_RELBUF))
503 			vfs_vmio_release(bp);
504 	}
505 	if (bp->b_qindex != QUEUE_NONE)
506 		panic("brelse: free buffer onto another queue???");
507 
508 	/* enqueue */
509 	/* buffers with no memory */
510 	if (bp->b_bufsize == 0) {
511 		bp->b_qindex = QUEUE_EMPTY;
512 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist);
513 		LIST_REMOVE(bp, b_hash);
514 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
515 		bp->b_dev = NODEV;
516 		if (needsbuffer) {
517 			wakeup(&needsbuffer);
518 			needsbuffer=0;
519 		}
520 		/* buffers with junk contents */
521 	} else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) {
522 		bp->b_qindex = QUEUE_AGE;
523 		TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist);
524 		LIST_REMOVE(bp, b_hash);
525 		LIST_INSERT_HEAD(&invalhash, bp, b_hash);
526 		bp->b_dev = NODEV;
527 		if (needsbuffer) {
528 			wakeup(&needsbuffer);
529 			needsbuffer=0;
530 		}
531 		/* buffers that are locked */
532 	} else if (bp->b_flags & B_LOCKED) {
533 		bp->b_qindex = QUEUE_LOCKED;
534 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
535 		/* buffers with stale but valid contents */
536 	} else if (bp->b_flags & B_AGE) {
537 		bp->b_qindex = QUEUE_AGE;
538 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist);
539 		if (needsbuffer) {
540 			wakeup(&needsbuffer);
541 			needsbuffer=0;
542 		}
543 		/* buffers with valid and quite potentially reuseable contents */
544 	} else {
545 		bp->b_qindex = QUEUE_LRU;
546 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
547 		if (needsbuffer) {
548 			wakeup(&needsbuffer);
549 			needsbuffer=0;
550 		}
551 	}
552 
553 	/* unlock */
554 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
555 	splx(s);
556 }
557 
558 /*
559  * Release a buffer.
560  */
561 void
562 bqrelse(struct buf * bp)
563 {
564 	int s;
565 
566 	s = splbio();
567 
568 
569 	/* anyone need this block? */
570 	if (bp->b_flags & B_WANTED) {
571 		bp->b_flags &= ~(B_WANTED | B_AGE);
572 		wakeup(bp);
573 	}
574 
575 	if (bp->b_qindex != QUEUE_NONE)
576 		panic("bqrelse: free buffer onto another queue???");
577 
578 	if (bp->b_flags & B_LOCKED) {
579 		bp->b_flags &= ~B_ERROR;
580 		bp->b_qindex = QUEUE_LOCKED;
581 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist);
582 		/* buffers with stale but valid contents */
583 	} else {
584 		bp->b_qindex = QUEUE_LRU;
585 		TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
586 		if (needsbuffer) {
587 			wakeup(&needsbuffer);
588 			needsbuffer=0;
589 		}
590 	}
591 
592 	/* unlock */
593 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
594 	splx(s);
595 }
596 
597 static void
598 vfs_vmio_release(bp)
599 	struct buf *bp;
600 {
601 	int i;
602 	vm_page_t m;
603 
604 	for (i = 0; i < bp->b_npages; i++) {
605 		m = bp->b_pages[i];
606 		bp->b_pages[i] = NULL;
607 		if (m->flags & PG_WANTED) {
608 			m->flags &= ~PG_WANTED;
609 			wakeup(m);
610 		}
611 		vm_page_unwire(m);
612 		if (m->wire_count == 0) {
613 			if (m->valid) {
614 				if(m->dirty == 0)
615 					vm_page_test_dirty(m);
616 				/*
617 				 * this keeps pressure off of the process memory
618 				 */
619 				if ((vm_swap_size == 0) ||
620 					(cnt.v_free_count < cnt.v_free_min)) {
621 					if (m->dirty == 0)
622 						vm_page_cache(m);
623 					else
624 						vm_page_deactivate(m);
625 				}
626 			} else if ((m->hold_count == 0) &&
627 				((m->flags & PG_BUSY) == 0) &&
628 				(m->busy == 0)) {
629 				vm_page_protect(m, VM_PROT_NONE);
630 				vm_page_free(m);
631 			}
632 		}
633 	}
634 	bufspace -= bp->b_bufsize;
635 	vmiospace -= bp->b_bufsize;
636 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
637 	bp->b_npages = 0;
638 	bp->b_bufsize = 0;
639 	bp->b_flags &= ~B_VMIO;
640 	if (bp->b_vp)
641 		brelvp(bp);
642 }
643 
644 /*
645  * Check to see if a block is currently memory resident.
646  */
647 __inline struct buf *
648 gbincore(struct vnode * vp, daddr_t blkno)
649 {
650 	struct buf *bp;
651 	struct bufhashhdr *bh;
652 
653 	bh = BUFHASH(vp, blkno);
654 	bp = bh->lh_first;
655 
656 	/* Search hash chain */
657 	while (bp != NULL) {
658 		/* hit */
659 		if (bp->b_vp == vp && bp->b_lblkno == blkno &&
660 		    (bp->b_flags & B_INVAL) == 0) {
661 			break;
662 		}
663 		bp = bp->b_hash.le_next;
664 	}
665 	return (bp);
666 }
667 
668 /*
669  * this routine implements clustered async writes for
670  * clearing out B_DELWRI buffers...  This is much better
671  * than the old way of writing only one buffer at a time.
672  */
673 int
674 vfs_bio_awrite(struct buf * bp)
675 {
676 	int i;
677 	daddr_t lblkno = bp->b_lblkno;
678 	struct vnode *vp = bp->b_vp;
679 	int s;
680 	int ncl;
681 	struct buf *bpa;
682 	int nwritten;
683 
684 	s = splbio();
685 	/*
686 	 * right now we support clustered writing only to regular files
687 	 */
688 	if ((vp->v_type == VREG) &&
689 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
690 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
691 		int size;
692 		int maxcl;
693 
694 		size = vp->v_mount->mnt_stat.f_iosize;
695 		maxcl = MAXPHYS / size;
696 
697 		for (i = 1; i < maxcl; i++) {
698 			if ((bpa = gbincore(vp, lblkno + i)) &&
699 			    ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) ==
700 			    (B_DELWRI | B_CLUSTEROK)) &&
701 			    (bpa->b_bufsize == size)) {
702 				if ((bpa->b_blkno == bpa->b_lblkno) ||
703 				    (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT)))
704 					break;
705 			} else {
706 				break;
707 			}
708 		}
709 		ncl = i;
710 		/*
711 		 * this is a possible cluster write
712 		 */
713 		if (ncl != 1) {
714 			nwritten = cluster_wbuild(vp, size, lblkno, ncl);
715 			splx(s);
716 			return nwritten;
717 		}
718 	}
719 	bremfree(bp);
720 	splx(s);
721 	/*
722 	 * default (old) behavior, writing out only one block
723 	 */
724 	bp->b_flags |= B_BUSY | B_ASYNC;
725 	nwritten = bp->b_bufsize;
726 	(void) VOP_BWRITE(bp);
727 	return nwritten;
728 }
729 
730 
731 /*
732  * Find a buffer header which is available for use.
733  */
734 static struct buf *
735 getnewbuf(int slpflag, int slptimeo, int doingvmio)
736 {
737 	struct buf *bp;
738 	int s;
739 	int nbyteswritten = 0;
740 
741 start:
742 	if (bufspace >= maxbufspace)
743 		goto trytofreespace;
744 
745 	/* can we constitute a new buffer? */
746 	if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) {
747 		if (bp->b_qindex != QUEUE_EMPTY)
748 			panic("getnewbuf: inconsistent EMPTY queue, qindex=%d",
749 			    bp->b_qindex);
750 		bp->b_flags |= B_BUSY;
751 		bremfree(bp);
752 		goto fillbuf;
753 	}
754 trytofreespace:
755 	/*
756 	 * We keep the file I/O from hogging metadata I/O
757 	 * This is desirable because file data is cached in the
758 	 * VM/Buffer cache even if a buffer is freed.
759 	 */
760 	if ((bp = bufqueues[QUEUE_AGE].tqh_first)) {
761 		if (bp->b_qindex != QUEUE_AGE)
762 			panic("getnewbuf: inconsistent AGE queue, qindex=%d",
763 			    bp->b_qindex);
764 	} else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) {
765 		if (bp->b_qindex != QUEUE_LRU)
766 			panic("getnewbuf: inconsistent LRU queue, qindex=%d",
767 			    bp->b_qindex);
768 	}
769 	if (!bp) {
770 		/* wait for a free buffer of any kind */
771 		needsbuffer = 1;
772 		tsleep(&needsbuffer,
773 			(PRIBIO + 1) | slpflag, "newbuf", slptimeo);
774 		return (0);
775 	}
776 
777 	/*
778 	 * We are fairly aggressive about freeing VMIO buffers, but since
779 	 * the buffering is intact without buffer headers, there is not
780 	 * much loss.  We gain by maintaining non-VMIOed metadata in buffers.
781 	 */
782 	if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) {
783 		if ((bp->b_flags & B_VMIO) == 0 ||
784 			(vmiospace < maxvmiobufspace)) {
785 			--bp->b_usecount;
786 			TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist);
787 			if (bufqueues[QUEUE_LRU].tqh_first != NULL) {
788 				TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
789 				goto start;
790 			}
791 			TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist);
792 		}
793 	}
794 
795 	/* if we are a delayed write, convert to an async write */
796 	if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) {
797 		nbyteswritten += vfs_bio_awrite(bp);
798 		if (!slpflag && !slptimeo) {
799 			return (0);
800 		}
801 		goto start;
802 	}
803 
804 	if (bp->b_flags & B_WANTED) {
805 		bp->b_flags &= ~B_WANTED;
806 		wakeup(bp);
807 	}
808 	bremfree(bp);
809 	bp->b_flags |= B_BUSY;
810 
811 	if (bp->b_flags & B_VMIO)
812 		vfs_vmio_release(bp);
813 
814 	if (bp->b_vp)
815 		brelvp(bp);
816 
817 fillbuf:
818 	/* we are not free, nor do we contain interesting data */
819 	if (bp->b_rcred != NOCRED) {
820 		crfree(bp->b_rcred);
821 		bp->b_rcred = NOCRED;
822 	}
823 	if (bp->b_wcred != NOCRED) {
824 		crfree(bp->b_wcred);
825 		bp->b_wcred = NOCRED;
826 	}
827 
828 	LIST_REMOVE(bp, b_hash);
829 	LIST_INSERT_HEAD(&invalhash, bp, b_hash);
830 	if (bp->b_bufsize) {
831 		allocbuf(bp, 0);
832 	}
833 	bp->b_flags = B_BUSY;
834 	bp->b_dev = NODEV;
835 	bp->b_vp = NULL;
836 	bp->b_blkno = bp->b_lblkno = 0;
837 	bp->b_iodone = 0;
838 	bp->b_error = 0;
839 	bp->b_resid = 0;
840 	bp->b_bcount = 0;
841 	bp->b_npages = 0;
842 	bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE;
843 	bp->b_dirtyoff = bp->b_dirtyend = 0;
844 	bp->b_validoff = bp->b_validend = 0;
845 	bp->b_usecount = 2;
846 	if (bufspace >= maxbufspace + nbyteswritten) {
847 		bp->b_flags |= B_INVAL;
848 		brelse(bp);
849 		goto trytofreespace;
850 	}
851 	return (bp);
852 }
853 
854 /*
855  * Check to see if a block is currently memory resident.
856  */
857 struct buf *
858 incore(struct vnode * vp, daddr_t blkno)
859 {
860 	struct buf *bp;
861 	struct bufhashhdr *bh;
862 
863 	int s = splbio();
864 	bp = gbincore(vp, blkno);
865 	splx(s);
866 	return (bp);
867 }
868 
869 /*
870  * Returns true if no I/O is needed to access the
871  * associated VM object.  This is like incore except
872  * it also hunts around in the VM system for the data.
873  */
874 
875 int
876 inmem(struct vnode * vp, daddr_t blkno)
877 {
878 	vm_object_t obj;
879 	vm_offset_t toff, tinc;
880 	vm_page_t m;
881 	vm_ooffset_t off;
882 
883 	if (incore(vp, blkno))
884 		return 1;
885 	if (vp->v_mount == NULL)
886 		return 0;
887 	if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0)
888 		return 0;
889 
890 	obj = vp->v_object;
891 	tinc = PAGE_SIZE;
892 	if (tinc > vp->v_mount->mnt_stat.f_iosize)
893 		tinc = vp->v_mount->mnt_stat.f_iosize;
894 	off = blkno * vp->v_mount->mnt_stat.f_iosize;
895 
896 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
897 
898 		m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
899 		if (!m)
900 			return 0;
901 		if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0)
902 			return 0;
903 	}
904 	return 1;
905 }
906 
907 /*
908  * now we set the dirty range for the buffer --
909  * for NFS -- if the file is mapped and pages have
910  * been written to, let it know.  We want the
911  * entire range of the buffer to be marked dirty if
912  * any of the pages have been written to for consistancy
913  * with the b_validoff, b_validend set in the nfs write
914  * code, and used by the nfs read code.
915  */
916 static void
917 vfs_setdirty(struct buf *bp) {
918 	int i;
919 	vm_object_t object;
920 	vm_offset_t boffset, offset;
921 	/*
922 	 * We qualify the scan for modified pages on whether the
923 	 * object has been flushed yet.  The OBJ_WRITEABLE flag
924 	 * is not cleared simply by protecting pages off.
925 	 */
926 	if ((bp->b_flags & B_VMIO) &&
927 		((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) {
928 		/*
929 		 * test the pages to see if they have been modified directly
930 		 * by users through the VM system.
931 		 */
932 		for (i = 0; i < bp->b_npages; i++)
933 			vm_page_test_dirty(bp->b_pages[i]);
934 
935 		/*
936 		 * scan forwards for the first page modified
937 		 */
938 		for (i = 0; i < bp->b_npages; i++) {
939 			if (bp->b_pages[i]->dirty) {
940 				break;
941 			}
942 		}
943 		boffset = (i << PAGE_SHIFT);
944 		if (boffset < bp->b_dirtyoff) {
945 			bp->b_dirtyoff = boffset;
946 		}
947 
948 		/*
949 		 * scan backwards for the last page modified
950 		 */
951 		for (i = bp->b_npages - 1; i >= 0; --i) {
952 			if (bp->b_pages[i]->dirty) {
953 				break;
954 			}
955 		}
956 		boffset = (i + 1);
957 		offset = boffset + bp->b_pages[0]->pindex;
958 		if (offset >= object->size)
959 			boffset = object->size - bp->b_pages[0]->pindex;
960 		if (bp->b_dirtyend < (boffset << PAGE_SHIFT))
961 			bp->b_dirtyend = (boffset << PAGE_SHIFT);
962 	}
963 }
964 
965 /*
966  * Get a block given a specified block and offset into a file/device.
967  */
968 struct buf *
969 getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
970 {
971 	struct buf *bp;
972 	int s;
973 	struct bufhashhdr *bh;
974 
975 	s = splbio();
976 loop:
977 	if ((bp = gbincore(vp, blkno))) {
978 		if (bp->b_flags & B_BUSY) {
979 			bp->b_flags |= B_WANTED;
980 			if (bp->b_usecount < BUF_MAXUSE)
981 				++bp->b_usecount;
982 			if (!tsleep(bp,
983 				(PRIBIO + 1) | slpflag, "getblk", slptimeo))
984 				goto loop;
985 
986 			splx(s);
987 			return (struct buf *) NULL;
988 		}
989 		bp->b_flags |= B_BUSY | B_CACHE;
990 		bremfree(bp);
991 
992 		/*
993 		 * check for size inconsistancies (note that they shouldn't happen
994 		 * but do when filesystems don't handle the size changes correctly.)
995 		 * We are conservative on metadata and don't just extend the buffer
996 		 * but write and re-constitute it.
997 		 */
998 
999 		if (bp->b_bcount != size) {
1000 			if (bp->b_flags & B_VMIO) {
1001 				allocbuf(bp, size);
1002 			} else {
1003 				bp->b_flags |= B_NOCACHE;
1004 				VOP_BWRITE(bp);
1005 				goto loop;
1006 			}
1007 		}
1008 
1009 		if (bp->b_usecount < BUF_MAXUSE)
1010 			++bp->b_usecount;
1011 		splx(s);
1012 		return (bp);
1013 	} else {
1014 		vm_object_t obj;
1015 		int doingvmio;
1016 
1017 		if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) {
1018 			doingvmio = 1;
1019 		} else {
1020 			doingvmio = 0;
1021 		}
1022 		if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) {
1023 			if (slpflag || slptimeo) {
1024 				splx(s);
1025 				return NULL;
1026 			}
1027 			goto loop;
1028 		}
1029 
1030 		/*
1031 		 * This code is used to make sure that a buffer is not
1032 		 * created while the getnewbuf routine is blocked.
1033 		 * Normally the vnode is locked so this isn't a problem.
1034 		 * VBLK type I/O requests, however, don't lock the vnode.
1035 		 */
1036 		if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) {
1037 			bp->b_flags |= B_INVAL;
1038 			brelse(bp);
1039 			goto loop;
1040 		}
1041 
1042 		/*
1043 		 * Insert the buffer into the hash, so that it can
1044 		 * be found by incore.
1045 		 */
1046 		bp->b_blkno = bp->b_lblkno = blkno;
1047 		bgetvp(vp, bp);
1048 		LIST_REMOVE(bp, b_hash);
1049 		bh = BUFHASH(vp, blkno);
1050 		LIST_INSERT_HEAD(bh, bp, b_hash);
1051 
1052 		if (doingvmio) {
1053 			bp->b_flags |= (B_VMIO | B_CACHE);
1054 #if defined(VFS_BIO_DEBUG)
1055 			if (vp->v_type != VREG && vp->v_type != VBLK)
1056 				printf("getblk: vmioing file type %d???\n", vp->v_type);
1057 #endif
1058 		} else {
1059 			bp->b_flags &= ~B_VMIO;
1060 		}
1061 		splx(s);
1062 
1063 		allocbuf(bp, size);
1064 		return (bp);
1065 	}
1066 }
1067 
1068 /*
1069  * Get an empty, disassociated buffer of given size.
1070  */
1071 struct buf *
1072 geteblk(int size)
1073 {
1074 	struct buf *bp;
1075 	int s;
1076 
1077 	s = splbio();
1078 	while ((bp = getnewbuf(0, 0, 0)) == 0);
1079 	splx(s);
1080 	allocbuf(bp, size);
1081 	bp->b_flags |= B_INVAL;
1082 	return (bp);
1083 }
1084 
1085 
1086 /*
1087  * This code constitutes the buffer memory from either anonymous system
1088  * memory (in the case of non-VMIO operations) or from an associated
1089  * VM object (in the case of VMIO operations).
1090  *
1091  * Note that this code is tricky, and has many complications to resolve
1092  * deadlock or inconsistant data situations.  Tread lightly!!!
1093  *
1094  * Modify the length of a buffer's underlying buffer storage without
1095  * destroying information (unless, of course the buffer is shrinking).
1096  */
1097 int
1098 allocbuf(struct buf * bp, int size)
1099 {
1100 
1101 	int s;
1102 	int newbsize, mbsize;
1103 	int i;
1104 
1105 	if (!(bp->b_flags & B_BUSY))
1106 		panic("allocbuf: buffer not busy");
1107 
1108 	if ((bp->b_flags & B_VMIO) == 0) {
1109 		caddr_t origbuf;
1110 		int origbufsize;
1111 		/*
1112 		 * Just get anonymous memory from the kernel
1113 		 */
1114 		mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1115 		if (bp->b_flags & B_MALLOC)
1116 			newbsize = mbsize;
1117 		else
1118 			newbsize = round_page(size);
1119 
1120 		if (newbsize < bp->b_bufsize) {
1121 			/*
1122 			 * malloced buffers are not shrunk
1123 			 */
1124 			if (bp->b_flags & B_MALLOC) {
1125 				if (newbsize) {
1126 					bp->b_bcount = size;
1127 				} else {
1128 					free(bp->b_data, M_TEMP);
1129 					bufspace -= bp->b_bufsize;
1130 					bufmallocspace -= bp->b_bufsize;
1131 					bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
1132 					bp->b_bufsize = 0;
1133 					bp->b_bcount = 0;
1134 					bp->b_flags &= ~B_MALLOC;
1135 				}
1136 				return 1;
1137 			}
1138 			vm_hold_free_pages(
1139 			    bp,
1140 			    (vm_offset_t) bp->b_data + newbsize,
1141 			    (vm_offset_t) bp->b_data + bp->b_bufsize);
1142 		} else if (newbsize > bp->b_bufsize) {
1143 			/*
1144 			 * We only use malloced memory on the first allocation.
1145 			 * and revert to page-allocated memory when the buffer grows.
1146 			 */
1147 			if ( (bufmallocspace < maxbufmallocspace) &&
1148 				(bp->b_bufsize == 0) &&
1149 				(mbsize <= PAGE_SIZE/2)) {
1150 
1151 				bp->b_data = malloc(mbsize, M_TEMP, M_WAITOK);
1152 				bp->b_bufsize = mbsize;
1153 				bp->b_bcount = size;
1154 				bp->b_flags |= B_MALLOC;
1155 				bufspace += mbsize;
1156 				bufmallocspace += mbsize;
1157 				return 1;
1158 			}
1159 			origbuf = NULL;
1160 			origbufsize = 0;
1161 			/*
1162 			 * If the buffer is growing on it's other-than-first allocation,
1163 			 * then we revert to the page-allocation scheme.
1164 			 */
1165 			if (bp->b_flags & B_MALLOC) {
1166 				origbuf = bp->b_data;
1167 				origbufsize = bp->b_bufsize;
1168 				bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE;
1169 				bufspace -= bp->b_bufsize;
1170 				bufmallocspace -= bp->b_bufsize;
1171 				bp->b_bufsize = 0;
1172 				bp->b_flags &= ~B_MALLOC;
1173 				newbsize = round_page(newbsize);
1174 			}
1175 			vm_hold_load_pages(
1176 			    bp,
1177 			    (vm_offset_t) bp->b_data + bp->b_bufsize,
1178 			    (vm_offset_t) bp->b_data + newbsize);
1179 			if (origbuf) {
1180 				bcopy(origbuf, bp->b_data, origbufsize);
1181 				free(origbuf, M_TEMP);
1182 			}
1183 		}
1184 	} else {
1185 		vm_page_t m;
1186 		int desiredpages;
1187 
1188 		newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1189 		desiredpages = (round_page(newbsize) >> PAGE_SHIFT);
1190 
1191 		if (bp->b_flags & B_MALLOC)
1192 			panic("allocbuf: VMIO buffer can't be malloced");
1193 
1194 		if (newbsize < bp->b_bufsize) {
1195 			if (desiredpages < bp->b_npages) {
1196 				for (i = desiredpages; i < bp->b_npages; i++) {
1197 					/*
1198 					 * the page is not freed here -- it
1199 					 * is the responsibility of vnode_pager_setsize
1200 					 */
1201 					m = bp->b_pages[i];
1202 					s = splhigh();
1203 					while ((m->flags & PG_BUSY) || (m->busy != 0)) {
1204 						m->flags |= PG_WANTED;
1205 						tsleep(m, PVM, "biodep", 0);
1206 					}
1207 					splx(s);
1208 
1209 					bp->b_pages[i] = NULL;
1210 					vm_page_unwire(m);
1211 				}
1212 				pmap_qremove((vm_offset_t) trunc_page(bp->b_data) +
1213 				    (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages));
1214 				bp->b_npages = desiredpages;
1215 			}
1216 		} else if (newbsize > bp->b_bufsize) {
1217 			vm_object_t obj;
1218 			vm_offset_t tinc, toff;
1219 			vm_ooffset_t off;
1220 			vm_pindex_t objoff;
1221 			int pageindex, curbpnpages;
1222 			struct vnode *vp;
1223 			int bsize;
1224 
1225 			vp = bp->b_vp;
1226 
1227 			if (vp->v_type == VBLK)
1228 				bsize = DEV_BSIZE;
1229 			else
1230 				bsize = vp->v_mount->mnt_stat.f_iosize;
1231 
1232 			if (bp->b_npages < desiredpages) {
1233 				obj = vp->v_object;
1234 				tinc = PAGE_SIZE;
1235 				if (tinc > bsize)
1236 					tinc = bsize;
1237 				off = (vm_ooffset_t) bp->b_lblkno * bsize;
1238 		doretry:
1239 				curbpnpages = bp->b_npages;
1240 				bp->b_flags |= B_CACHE;
1241 				for (toff = 0; toff < newbsize; toff += tinc) {
1242 					int bytesinpage;
1243 
1244 					pageindex = toff >> PAGE_SHIFT;
1245 					objoff = OFF_TO_IDX(off + toff);
1246 					if (pageindex < curbpnpages) {
1247 
1248 						m = bp->b_pages[pageindex];
1249 #ifdef VFS_BIO_DIAG
1250 						if (m->pindex != objoff)
1251 							panic("allocbuf: page changed offset??!!!?");
1252 #endif
1253 						bytesinpage = tinc;
1254 						if (tinc > (newbsize - toff))
1255 							bytesinpage = newbsize - toff;
1256 						if ((bp->b_flags & B_CACHE) &&
1257 							!vm_page_is_valid(m,
1258 							(vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)),
1259 							bytesinpage)) {
1260 							bp->b_flags &= ~B_CACHE;
1261 						}
1262 						continue;
1263 					}
1264 					m = vm_page_lookup(obj, objoff);
1265 					if (!m) {
1266 						m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL);
1267 						if (!m) {
1268 							VM_WAIT;
1269 							goto doretry;
1270 						}
1271 						/*
1272 						 * Normally it is unwise to clear PG_BUSY without
1273 						 * PAGE_WAKEUP -- but it is okay here, as there is
1274 						 * no chance for blocking between here and vm_page_alloc
1275 						 */
1276 						m->flags &= ~PG_BUSY;
1277 						vm_page_wire(m);
1278 						bp->b_flags &= ~B_CACHE;
1279 					} else if (m->flags & PG_BUSY) {
1280 
1281 						s = splhigh();
1282 						m->flags |= PG_WANTED;
1283 						tsleep(m, PVM, "pgtblk", 0);
1284 						splx(s);
1285 
1286 						goto doretry;
1287 					} else {
1288 						if ((curproc != pageproc) &&
1289 							(m->queue == PQ_CACHE) &&
1290 						    ((cnt.v_free_count + cnt.v_cache_count) <
1291 								(cnt.v_free_min + cnt.v_cache_min))) {
1292 							pagedaemon_wakeup();
1293 						}
1294 						bytesinpage = tinc;
1295 						if (tinc > (newbsize - toff))
1296 							bytesinpage = newbsize - toff;
1297 						if ((bp->b_flags & B_CACHE) &&
1298 							!vm_page_is_valid(m,
1299 							(vm_offset_t) ((toff + off) & (PAGE_SIZE - 1)),
1300 							bytesinpage)) {
1301 							bp->b_flags &= ~B_CACHE;
1302 						}
1303 						vm_page_wire(m);
1304 					}
1305 					bp->b_pages[pageindex] = m;
1306 					curbpnpages = pageindex + 1;
1307 				}
1308 				bp->b_data = (caddr_t) trunc_page(bp->b_data);
1309 				bp->b_npages = curbpnpages;
1310 				pmap_qenter((vm_offset_t) bp->b_data,
1311 					bp->b_pages, bp->b_npages);
1312 				((vm_offset_t) bp->b_data) |= off & (PAGE_SIZE - 1);
1313 			}
1314 		}
1315 	}
1316 	if (bp->b_flags & B_VMIO)
1317 		vmiospace += bp->b_bufsize;
1318 	bufspace += (newbsize - bp->b_bufsize);
1319 	bp->b_bufsize = newbsize;
1320 	bp->b_bcount = size;
1321 	return 1;
1322 }
1323 
1324 /*
1325  * Wait for buffer I/O completion, returning error status.
1326  */
1327 int
1328 biowait(register struct buf * bp)
1329 {
1330 	int s;
1331 
1332 	s = splbio();
1333 	while ((bp->b_flags & B_DONE) == 0)
1334 		tsleep(bp, PRIBIO, "biowait", 0);
1335 	splx(s);
1336 	if (bp->b_flags & B_EINTR) {
1337 		bp->b_flags &= ~B_EINTR;
1338 		return (EINTR);
1339 	}
1340 	if (bp->b_flags & B_ERROR) {
1341 		return (bp->b_error ? bp->b_error : EIO);
1342 	} else {
1343 		return (0);
1344 	}
1345 }
1346 
1347 /*
1348  * Finish I/O on a buffer, calling an optional function.
1349  * This is usually called from interrupt level, so process blocking
1350  * is not *a good idea*.
1351  */
1352 void
1353 biodone(register struct buf * bp)
1354 {
1355 	int s;
1356 
1357 	s = splbio();
1358 	if (!(bp->b_flags & B_BUSY))
1359 		panic("biodone: buffer not busy");
1360 
1361 	if (bp->b_flags & B_DONE) {
1362 		splx(s);
1363 		printf("biodone: buffer already done\n");
1364 		return;
1365 	}
1366 	bp->b_flags |= B_DONE;
1367 
1368 	if ((bp->b_flags & B_READ) == 0) {
1369 		vwakeup(bp);
1370 	}
1371 #ifdef BOUNCE_BUFFERS
1372 	if (bp->b_flags & B_BOUNCE)
1373 		vm_bounce_free(bp);
1374 #endif
1375 
1376 	/* call optional completion function if requested */
1377 	if (bp->b_flags & B_CALL) {
1378 		bp->b_flags &= ~B_CALL;
1379 		(*bp->b_iodone) (bp);
1380 		splx(s);
1381 		return;
1382 	}
1383 	if (bp->b_flags & B_VMIO) {
1384 		int i, resid;
1385 		vm_ooffset_t foff;
1386 		vm_page_t m;
1387 		vm_object_t obj;
1388 		int iosize;
1389 		struct vnode *vp = bp->b_vp;
1390 
1391 		if (vp->v_type == VBLK)
1392 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1393 		else
1394 			foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1395 		obj = vp->v_object;
1396 		if (!obj) {
1397 			panic("biodone: no object");
1398 		}
1399 #if defined(VFS_BIO_DEBUG)
1400 		if (obj->paging_in_progress < bp->b_npages) {
1401 			printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n",
1402 			    obj->paging_in_progress, bp->b_npages);
1403 		}
1404 #endif
1405 		iosize = bp->b_bufsize;
1406 		for (i = 0; i < bp->b_npages; i++) {
1407 			int bogusflag = 0;
1408 			m = bp->b_pages[i];
1409 			if (m == bogus_page) {
1410 				bogusflag = 1;
1411 				m = vm_page_lookup(obj, OFF_TO_IDX(foff));
1412 				if (!m) {
1413 #if defined(VFS_BIO_DEBUG)
1414 					printf("biodone: page disappeared\n");
1415 #endif
1416 					--obj->paging_in_progress;
1417 					continue;
1418 				}
1419 				bp->b_pages[i] = m;
1420 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1421 			}
1422 #if defined(VFS_BIO_DEBUG)
1423 			if (OFF_TO_IDX(foff) != m->pindex) {
1424 				printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex);
1425 			}
1426 #endif
1427 			resid = IDX_TO_OFF(m->pindex + 1) - foff;
1428 			if (resid > iosize)
1429 				resid = iosize;
1430 			/*
1431 			 * In the write case, the valid and clean bits are
1432 			 * already changed correctly, so we only need to do this
1433 			 * here in the read case.
1434 			 */
1435 			if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) {
1436 				vm_page_set_validclean(m,
1437 					(vm_offset_t) (foff & (PAGE_SIZE-1)), resid);
1438 			}
1439 
1440 			/*
1441 			 * when debugging new filesystems or buffer I/O methods, this
1442 			 * is the most common error that pops up.  if you see this, you
1443 			 * have not set the page busy flag correctly!!!
1444 			 */
1445 			if (m->busy == 0) {
1446 				printf("biodone: page busy < 0, "
1447 				    "pindex: %d, foff: 0x(%x,%x), "
1448 				    "resid: %d, index: %d\n",
1449 				    (int) m->pindex, (int)(foff >> 32),
1450 						(int) foff & 0xffffffff, resid, i);
1451 				if (vp->v_type != VBLK)
1452 					printf(" iosize: %d, lblkno: %d, flags: 0x%lx, npages: %d\n",
1453 					    bp->b_vp->v_mount->mnt_stat.f_iosize,
1454 					    (int) bp->b_lblkno,
1455 					    bp->b_flags, bp->b_npages);
1456 				else
1457 					printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n",
1458 					    (int) bp->b_lblkno,
1459 					    bp->b_flags, bp->b_npages);
1460 				printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n",
1461 				    m->valid, m->dirty, m->wire_count);
1462 				panic("biodone: page busy < 0\n");
1463 			}
1464 			--m->busy;
1465 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1466 				m->flags &= ~PG_WANTED;
1467 				wakeup(m);
1468 			}
1469 			--obj->paging_in_progress;
1470 			foff += resid;
1471 			iosize -= resid;
1472 		}
1473 		if (obj && obj->paging_in_progress == 0 &&
1474 		    (obj->flags & OBJ_PIPWNT)) {
1475 			obj->flags &= ~OBJ_PIPWNT;
1476 			wakeup(obj);
1477 		}
1478 	}
1479 	/*
1480 	 * For asynchronous completions, release the buffer now. The brelse
1481 	 * checks for B_WANTED and will do the wakeup there if necessary - so
1482 	 * no need to do a wakeup here in the async case.
1483 	 */
1484 
1485 	if (bp->b_flags & B_ASYNC) {
1486 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
1487 			brelse(bp);
1488 		else
1489 			bqrelse(bp);
1490 	} else {
1491 		wakeup(bp);
1492 	}
1493 	splx(s);
1494 }
1495 
1496 int
1497 count_lock_queue()
1498 {
1499 	int count;
1500 	struct buf *bp;
1501 
1502 	count = 0;
1503 	for (bp = bufqueues[QUEUE_LOCKED].tqh_first;
1504 	    bp != NULL;
1505 	    bp = bp->b_freelist.tqe_next)
1506 		count++;
1507 	return (count);
1508 }
1509 
1510 int vfs_update_interval = 30;
1511 
1512 static void
1513 vfs_update()
1514 {
1515 	(void) spl0();		/* XXX redundant?  wrong place? */
1516 	while (1) {
1517 		tsleep(&vfs_update_wakeup, PUSER, "update",
1518 		    hz * vfs_update_interval);
1519 		vfs_update_wakeup = 0;
1520 		sync(curproc, NULL, NULL);
1521 	}
1522 }
1523 
1524 static int
1525 sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS
1526 {
1527 	int error = sysctl_handle_int(oidp,
1528 		oidp->oid_arg1, oidp->oid_arg2, req);
1529 	if (!error)
1530 		wakeup(&vfs_update_wakeup);
1531 	return error;
1532 }
1533 
1534 SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW,
1535 	&vfs_update_interval, 0, sysctl_kern_updateinterval, "I", "");
1536 
1537 
1538 /*
1539  * This routine is called in lieu of iodone in the case of
1540  * incomplete I/O.  This keeps the busy status for pages
1541  * consistant.
1542  */
1543 void
1544 vfs_unbusy_pages(struct buf * bp)
1545 {
1546 	int i;
1547 
1548 	if (bp->b_flags & B_VMIO) {
1549 		struct vnode *vp = bp->b_vp;
1550 		vm_object_t obj = vp->v_object;
1551 		vm_ooffset_t foff;
1552 
1553 		foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1554 
1555 		for (i = 0; i < bp->b_npages; i++) {
1556 			vm_page_t m = bp->b_pages[i];
1557 
1558 			if (m == bogus_page) {
1559 				m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i);
1560 				if (!m) {
1561 					panic("vfs_unbusy_pages: page missing\n");
1562 				}
1563 				bp->b_pages[i] = m;
1564 				pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1565 			}
1566 			--obj->paging_in_progress;
1567 			--m->busy;
1568 			if ((m->busy == 0) && (m->flags & PG_WANTED)) {
1569 				m->flags &= ~PG_WANTED;
1570 				wakeup(m);
1571 			}
1572 		}
1573 		if (obj->paging_in_progress == 0 &&
1574 		    (obj->flags & OBJ_PIPWNT)) {
1575 			obj->flags &= ~OBJ_PIPWNT;
1576 			wakeup(obj);
1577 		}
1578 	}
1579 }
1580 
1581 /*
1582  * This routine is called before a device strategy routine.
1583  * It is used to tell the VM system that paging I/O is in
1584  * progress, and treat the pages associated with the buffer
1585  * almost as being PG_BUSY.  Also the object paging_in_progress
1586  * flag is handled to make sure that the object doesn't become
1587  * inconsistant.
1588  */
1589 void
1590 vfs_busy_pages(struct buf * bp, int clear_modify)
1591 {
1592 	int i;
1593 
1594 	if (bp->b_flags & B_VMIO) {
1595 		vm_object_t obj = bp->b_vp->v_object;
1596 		vm_ooffset_t foff;
1597 		int iocount = bp->b_bufsize;
1598 
1599 		if (bp->b_vp->v_type == VBLK)
1600 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1601 		else
1602 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1603 		vfs_setdirty(bp);
1604 		for (i = 0; i < bp->b_npages; i++) {
1605 			vm_page_t m = bp->b_pages[i];
1606 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1607 
1608 			if (resid > iocount)
1609 				resid = iocount;
1610 			if ((bp->b_flags & B_CLUSTER) == 0) {
1611 				obj->paging_in_progress++;
1612 				m->busy++;
1613 			}
1614 			if (clear_modify) {
1615 				vm_page_protect(m, VM_PROT_READ);
1616 				vm_page_set_validclean(m,
1617 					(vm_offset_t) (foff & (PAGE_SIZE-1)), resid);
1618 			} else if (bp->b_bcount >= PAGE_SIZE) {
1619 				if (m->valid && (bp->b_flags & B_CACHE) == 0) {
1620 					bp->b_pages[i] = bogus_page;
1621 					pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1622 				}
1623 			}
1624 			foff += resid;
1625 			iocount -= resid;
1626 		}
1627 	}
1628 }
1629 
1630 /*
1631  * Tell the VM system that the pages associated with this buffer
1632  * are clean.  This is used for delayed writes where the data is
1633  * going to go to disk eventually without additional VM intevention.
1634  */
1635 void
1636 vfs_clean_pages(struct buf * bp)
1637 {
1638 	int i;
1639 
1640 	if (bp->b_flags & B_VMIO) {
1641 		vm_ooffset_t foff;
1642 		int iocount = bp->b_bufsize;
1643 
1644 		if (bp->b_vp->v_type == VBLK)
1645 			foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
1646 		else
1647 			foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
1648 
1649 		for (i = 0; i < bp->b_npages; i++) {
1650 			vm_page_t m = bp->b_pages[i];
1651 			int resid = IDX_TO_OFF(m->pindex + 1) - foff;
1652 
1653 			if (resid > iocount)
1654 				resid = iocount;
1655 			if (resid > 0) {
1656 				vm_page_set_validclean(m,
1657 					((vm_offset_t) foff & (PAGE_SIZE-1)), resid);
1658 			}
1659 			foff += resid;
1660 			iocount -= resid;
1661 		}
1662 	}
1663 }
1664 
1665 void
1666 vfs_bio_clrbuf(struct buf *bp) {
1667 	int i;
1668 	int remapbuffer = 0;
1669 	if( bp->b_flags & B_VMIO) {
1670 		if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) {
1671 			int mask;
1672 			mask = 0;
1673 			for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE)
1674 				mask |= (1 << (i/DEV_BSIZE));
1675 			if( bp->b_pages[0]->valid != mask) {
1676 				bzero(bp->b_data, bp->b_bufsize);
1677 			}
1678 			bp->b_pages[0]->valid = mask;
1679 			bp->b_resid = 0;
1680 			return;
1681 		}
1682 		for(i=0;i<bp->b_npages;i++) {
1683 			if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL)
1684 				continue;
1685 			if( bp->b_pages[i]->valid == 0) {
1686 				if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
1687 					bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE);
1688 				}
1689 			} else {
1690 				int j;
1691 				for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) {
1692 					if( (bp->b_pages[i]->valid & (1<<j)) == 0)
1693 						bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE);
1694 				}
1695 			}
1696 			bp->b_pages[i]->valid = VM_PAGE_BITS_ALL;
1697 		}
1698 		bp->b_resid = 0;
1699 	} else {
1700 		clrbuf(bp);
1701 	}
1702 	if (remapbuffer)
1703 			pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages);
1704 }
1705 
1706 /*
1707  * vm_hold_load_pages and vm_hold_unload pages get pages into
1708  * a buffers address space.  The pages are anonymous and are
1709  * not associated with a file object.
1710  */
1711 void
1712 vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1713 {
1714 	vm_offset_t pg;
1715 	vm_page_t p;
1716 	int index;
1717 
1718 	to = round_page(to);
1719 	from = round_page(from);
1720 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1721 
1722 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1723 
1724 tryagain:
1725 
1726 		p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT),
1727 		    VM_ALLOC_NORMAL);
1728 		if (!p) {
1729 			VM_WAIT;
1730 			goto tryagain;
1731 		}
1732 		vm_page_wire(p);
1733 		pmap_kenter(pg, VM_PAGE_TO_PHYS(p));
1734 		bp->b_pages[index] = p;
1735 		PAGE_WAKEUP(p);
1736 	}
1737 	bp->b_npages = to >> PAGE_SHIFT;
1738 }
1739 
1740 void
1741 vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
1742 {
1743 	vm_offset_t pg;
1744 	vm_page_t p;
1745 	int index;
1746 
1747 	from = round_page(from);
1748 	to = round_page(to);
1749 	index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT;
1750 
1751 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
1752 		p = bp->b_pages[index];
1753 		if (p && (index < bp->b_npages)) {
1754 			if (p->busy) {
1755 				printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n",
1756 					bp->b_blkno, bp->b_lblkno);
1757 			}
1758 			bp->b_pages[index] = NULL;
1759 			pmap_kremove(pg);
1760 			vm_page_unwire(p);
1761 			vm_page_free(p);
1762 		}
1763 	}
1764 	bp->b_npages = from >> PAGE_SHIFT;
1765 }
1766