xref: /freebsd/sys/kern/vfs_cluster.c (revision 48991a368427cadb9cdac39581d1676c29619c52)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $Id: vfs_cluster.c,v 1.27 1995/11/20 03:55:48 dyson Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/malloc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/vmmeter.h>
48 #include <miscfs/specfs/specdev.h>
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 
53 #ifdef DEBUG
54 #include <vm/vm.h>
55 #include <sys/sysctl.h>
56 int doreallocblks = 0;
57 SYSCTL_INT(_debug, 13, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
58 
59 #else
60 /* XXX for cluster_write */
61 #define doreallocblks 0
62 #endif
63 
64 /*
65  * Local declarations
66  */
67 static struct buf *cluster_rbuild __P((struct vnode *, u_quad_t,
68     daddr_t, daddr_t, long, int));
69 struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
70 
71 int totreads;
72 int totreadblocks;
73 extern vm_page_t bogus_page;
74 
75 #ifdef DIAGNOSTIC
76 /*
77  * Set to 1 if reads of block zero should cause readahead to be done.
78  * Set to 0 treats a read of block zero as a non-sequential read.
79  *
80  * Setting to one assumes that most reads of block zero of files are due to
81  * sequential passes over the files (e.g. cat, sum) where additional blocks
82  * will soon be needed.  Setting to zero assumes that the majority are
83  * surgical strikes to get particular info (e.g. size, file) where readahead
84  * blocks will not be used and, in fact, push out other potentially useful
85  * blocks from the cache.  The former seems intuitive, but some quick tests
86  * showed that the latter performed better from a system-wide point of view.
87  */
88 	int doclusterraz = 0;
89 
90 #define ISSEQREAD(vp, blk) \
91 	(((blk) != 0 || doclusterraz) && \
92 	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
93 #else
94 #define ISSEQREAD(vp, blk) \
95 	(/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
96 #endif
97 
98 /*
99  * allow for three entire read-aheads...  The system will
100  * adjust downwards rapidly if needed...
101  */
102 #define RA_MULTIPLE_FAST	2
103 #define RA_MULTIPLE_SLOW	3
104 #define RA_SHIFTDOWN	1	/* approx lg2(RA_MULTIPLE) */
105 /*
106  * This replaces bread.  If this is a bread at the beginning of a file and
107  * lastr is 0, we assume this is the first read and we'll read up to two
108  * blocks if they are sequential.  After that, we'll do regular read ahead
109  * in clustered chunks.
110  * 	bp is the block requested.
111  *	rbp is the read-ahead block.
112  *	If either is NULL, then you don't have to do the I/O.
113  */
114 int
115 cluster_read(vp, filesize, lblkno, size, cred, bpp)
116 	struct vnode *vp;
117 	u_quad_t filesize;
118 	daddr_t lblkno;
119 	long size;
120 	struct ucred *cred;
121 	struct buf **bpp;
122 {
123 	struct buf *bp, *rbp;
124 	daddr_t blkno, rablkno, origlblkno;
125 	int error, num_ra, alreadyincore;
126 	int i;
127 	int seq;
128 
129 	error = 0;
130 	/*
131 	 * get the requested block
132 	 */
133 	origlblkno = lblkno;
134 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
135 	seq = ISSEQREAD(vp, lblkno);
136 	/*
137 	 * if it is in the cache, then check to see if the reads have been
138 	 * sequential.  If they have, then try some read-ahead, otherwise
139 	 * back-off on prospective read-aheads.
140 	 */
141 	if (bp->b_flags & B_CACHE) {
142 		if (!seq) {
143 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
144 			vp->v_ralen >>= RA_SHIFTDOWN;
145 			return 0;
146 		} else if( vp->v_maxra > lblkno) {
147 			if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
148 				if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
149 					++vp->v_ralen;
150 				return 0;
151 			}
152 			lblkno = vp->v_maxra;
153 		} else {
154 			lblkno += 1;
155 		}
156 		bp = NULL;
157 	} else {
158 		/*
159 		 * if it isn't in the cache, then get a chunk from disk if
160 		 * sequential, otherwise just get the block.
161 		 */
162 		bp->b_flags |= B_READ;
163 		lblkno += 1;
164 		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
165 		vp->v_ralen = 0;
166 	}
167 	/*
168 	 * assume no read-ahead
169 	 */
170 	alreadyincore = 1;
171 	rablkno = lblkno;
172 
173 	/*
174 	 * if we have been doing sequential I/O, then do some read-ahead
175 	 */
176 	if (seq) {
177 
178 	/*
179 	 * bump ralen a bit...
180 	 */
181 		if ((vp->v_ralen + 1) < RA_MULTIPLE_SLOW*(MAXPHYS / size))
182 			++vp->v_ralen;
183 		/*
184 		 * this code makes sure that the stuff that we have read-ahead
185 		 * is still in the cache.  If it isn't, we have been reading
186 		 * ahead too much, and we need to back-off, otherwise we might
187 		 * try to read more.
188 		 */
189 		for (i = 0; i < vp->v_ralen; i++) {
190 			rablkno = lblkno + i;
191 			alreadyincore = (int) incore(vp, rablkno);
192 			if (!alreadyincore) {
193 				if (inmem(vp, rablkno)) {
194 					if (vp->v_maxra < rablkno)
195 						vp->v_maxra = rablkno + 1;
196 					continue;
197 				}
198 				if (rablkno < vp->v_maxra) {
199 					vp->v_maxra = rablkno;
200 					vp->v_ralen >>= RA_SHIFTDOWN;
201 					alreadyincore = 1;
202 				}
203 				break;
204 			} else if (vp->v_maxra < rablkno) {
205 				vp->v_maxra = rablkno + 1;
206 			}
207 		}
208 	}
209 	/*
210 	 * we now build the read-ahead buffer if it is desirable.
211 	 */
212 	rbp = NULL;
213 	if (!alreadyincore &&
214 	    (rablkno + 1) * size <= filesize &&
215 	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
216 	    blkno != -1) {
217 		if (num_ra > vp->v_ralen)
218 			num_ra = vp->v_ralen;
219 
220 		if (num_ra) {
221 			rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
222 				num_ra + 1);
223 		} else {
224 			rbp = getblk(vp, rablkno, size, 0, 0);
225 			rbp->b_flags |= B_READ | B_ASYNC;
226 			rbp->b_blkno = blkno;
227 		}
228 	}
229 
230 	/*
231 	 * handle the synchronous read
232 	 */
233 	if (bp) {
234 		if (bp->b_flags & (B_DONE | B_DELWRI))
235 			panic("cluster_read: DONE bp");
236 		else {
237 			vfs_busy_pages(bp, 0);
238 			error = VOP_STRATEGY(bp);
239 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
240 			totreads++;
241 			totreadblocks += bp->b_bcount / size;
242 			curproc->p_stats->p_ru.ru_inblock++;
243 		}
244 	}
245 	/*
246 	 * and if we have read-aheads, do them too
247 	 */
248 	if (rbp) {
249 		vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
250 		if (error || (rbp->b_flags & B_CACHE)) {
251 			rbp->b_flags &= ~(B_ASYNC | B_READ);
252 			brelse(rbp);
253 		} else {
254 			if ((rbp->b_flags & B_CLUSTER) == 0)
255 				vfs_busy_pages(rbp, 0);
256 			(void) VOP_STRATEGY(rbp);
257 			totreads++;
258 			totreadblocks += rbp->b_bcount / size;
259 			curproc->p_stats->p_ru.ru_inblock++;
260 		}
261 	}
262 	if (bp && ((bp->b_flags & B_ASYNC) == 0))
263 		return (biowait(bp));
264 	return (error);
265 }
266 
267 /*
268  * If blocks are contiguous on disk, use this to provide clustered
269  * read ahead.  We will read as many blocks as possible sequentially
270  * and then parcel them up into logical blocks in the buffer hash table.
271  */
272 static struct buf *
273 cluster_rbuild(vp, filesize, lbn, blkno, size, run)
274 	struct vnode *vp;
275 	u_quad_t filesize;
276 	daddr_t lbn;
277 	daddr_t blkno;
278 	long size;
279 	int run;
280 {
281 	struct buf *bp, *tbp;
282 	daddr_t bn;
283 	int i, inc, j;
284 
285 #ifdef DIAGNOSTIC
286 	if (size != vp->v_mount->mnt_stat.f_iosize)
287 		panic("cluster_rbuild: size %d != filesize %d\n",
288 		    size, vp->v_mount->mnt_stat.f_iosize);
289 #endif
290 	if (size * (lbn + run) > filesize)
291 		--run;
292 
293 	tbp = getblk(vp, lbn, size, 0, 0);
294 	if (tbp->b_flags & B_CACHE)
295 		return tbp;
296 
297 	tbp->b_blkno = blkno;
298 	tbp->b_flags |= B_ASYNC | B_READ;
299 	if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
300 		return tbp;
301 
302 	bp = trypbuf();
303 	if (bp == 0)
304 		return tbp;
305 
306 	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
307 	bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO;
308 	bp->b_iodone = cluster_callback;
309 	bp->b_blkno = blkno;
310 	bp->b_lblkno = lbn;
311 	pbgetvp(vp, bp);
312 
313 	TAILQ_INIT(&bp->b_cluster.cluster_head);
314 
315 	bp->b_bcount = 0;
316 	bp->b_bufsize = 0;
317 	bp->b_npages = 0;
318 
319 	inc = btodb(size);
320 	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
321 		if (i != 0) {
322 			if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
323 				break;
324 
325 			if (incore(vp, lbn + i))
326 				break;
327 			tbp = getblk(vp, lbn + i, size, 0, 0);
328 
329 			if ((tbp->b_flags & B_CACHE) ||
330 				(tbp->b_flags & B_VMIO) == 0) {
331 				brelse(tbp);
332 				break;
333 			}
334 
335 			for (j=0;j<tbp->b_npages;j++) {
336 				if (tbp->b_pages[j]->valid) {
337 					break;
338 				}
339 			}
340 
341 			if (j != tbp->b_npages) {
342 				/*
343 				 * force buffer to be re-constituted later
344 				 */
345 				tbp->b_flags |= B_RELBUF;
346 				brelse(tbp);
347 				break;
348 			}
349 
350 			tbp->b_flags |= B_READ | B_ASYNC;
351 			if( tbp->b_blkno == tbp->b_lblkno) {
352 				tbp->b_blkno = bn;
353 			} else if (tbp->b_blkno != bn) {
354 				brelse(tbp);
355 				break;
356 			}
357 		}
358 		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
359 			tbp, b_cluster.cluster_entry);
360 		for (j = 0; j < tbp->b_npages; j += 1) {
361 			vm_page_t m;
362 			m = tbp->b_pages[j];
363 			++m->busy;
364 			++m->object->paging_in_progress;
365 			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
366 				m = bogus_page;
367 			}
368 			if ((bp->b_npages == 0) ||
369 				(bp->b_pages[bp->b_npages-1] != m)) {
370 				bp->b_pages[bp->b_npages] = m;
371 				bp->b_npages++;
372 			}
373 		}
374 		bp->b_bcount += tbp->b_bcount;
375 		bp->b_bufsize += tbp->b_bufsize;
376 	}
377 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
378 		(vm_page_t *)bp->b_pages, bp->b_npages);
379 	return (bp);
380 }
381 
382 /*
383  * Cleanup after a clustered read or write.
384  * This is complicated by the fact that any of the buffers might have
385  * extra memory (if there were no empty buffer headers at allocbuf time)
386  * that we will need to shift around.
387  */
388 void
389 cluster_callback(bp)
390 	struct buf *bp;
391 {
392 	struct buf *nbp, *tbp;
393 	int error = 0;
394 
395 	/*
396 	 * Must propogate errors to all the components.
397 	 */
398 	if (bp->b_flags & B_ERROR)
399 		error = bp->b_error;
400 
401 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
402 	/*
403 	 * Move memory from the large cluster buffer into the component
404 	 * buffers and mark IO as done on these.
405 	 */
406 	for (tbp = bp->b_cluster.cluster_head.tqh_first;
407 		tbp; tbp = nbp) {
408 		nbp = tbp->b_cluster.cluster_entry.tqe_next;
409 		if (error) {
410 			tbp->b_flags |= B_ERROR;
411 			tbp->b_error = error;
412 		}
413 		biodone(tbp);
414 	}
415 	relpbuf(bp);
416 }
417 
418 /*
419  * Do clustered write for FFS.
420  *
421  * Three cases:
422  *	1. Write is not sequential (write asynchronously)
423  *	Write is sequential:
424  *	2.	beginning of cluster - begin cluster
425  *	3.	middle of a cluster - add to cluster
426  *	4.	end of a cluster - asynchronously write cluster
427  */
428 void
429 cluster_write(bp, filesize)
430 	struct buf *bp;
431 	u_quad_t filesize;
432 {
433 	struct vnode *vp;
434 	daddr_t lbn;
435 	int maxclen, cursize;
436 	int lblocksize;
437 	int async;
438 
439 	vp = bp->b_vp;
440 	async = (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC));
441 	lblocksize = vp->v_mount->mnt_stat.f_iosize;
442 	lbn = bp->b_lblkno;
443 
444 	/* Initialize vnode to beginning of file. */
445 	if (lbn == 0)
446 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
447 
448 	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
449 	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
450 		maxclen = MAXPHYS / lblocksize - 1;
451 		if (vp->v_clen != 0) {
452 			/*
453 			 * Next block is not sequential.
454 			 *
455 			 * If we are not writing at end of file, the process
456 			 * seeked to another point in the file since its last
457 			 * write, or we have reached our maximum cluster size,
458 			 * then push the previous cluster. Otherwise try
459 			 * reallocating to make it sequential.
460 			 */
461 			cursize = vp->v_lastw - vp->v_cstart + 1;
462 #if 1
463 			if ((lbn + 1) * lblocksize != filesize ||
464 				lbn != vp->v_lastw + 1 ||
465 				vp->v_clen <= cursize) {
466 				if (!async)
467 					cluster_wbuild(vp, lblocksize,
468 						vp->v_cstart, cursize);
469 			}
470 #else
471 			if (!doreallocblks ||
472 			    (lbn + 1) * lblocksize != filesize ||
473 			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
474 				if (!async)
475 					cluster_wbuild(vp, lblocksize,
476 						vp->v_cstart, cursize);
477 			} else {
478 				struct buf **bpp, **endbp;
479 				struct cluster_save *buflist;
480 
481 				buflist = cluster_collectbufs(vp, bp);
482 				endbp = &buflist->bs_children
483 				    [buflist->bs_nchildren - 1];
484 				if (VOP_REALLOCBLKS(vp, buflist)) {
485 					/*
486 					 * Failed, push the previous cluster.
487 					 */
488 					for (bpp = buflist->bs_children;
489 					     bpp < endbp; bpp++)
490 						brelse(*bpp);
491 					free(buflist, M_SEGMENT);
492 					cluster_wbuild(vp, lblocksize,
493 					    vp->v_cstart, cursize);
494 				} else {
495 					/*
496 					 * Succeeded, keep building cluster.
497 					 */
498 					for (bpp = buflist->bs_children;
499 					     bpp <= endbp; bpp++)
500 						bdwrite(*bpp);
501 					free(buflist, M_SEGMENT);
502 					vp->v_lastw = lbn;
503 					vp->v_lasta = bp->b_blkno;
504 					return;
505 				}
506 			}
507 #endif
508 		}
509 		/*
510 		 * Consider beginning a cluster. If at end of file, make
511 		 * cluster as large as possible, otherwise find size of
512 		 * existing cluster.
513 		 */
514 		if ((lbn + 1) * lblocksize != filesize &&
515 		    (bp->b_blkno == bp->b_lblkno) &&
516 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
517 		     bp->b_blkno == -1)) {
518 			bawrite(bp);
519 			vp->v_clen = 0;
520 			vp->v_lasta = bp->b_blkno;
521 			vp->v_cstart = lbn + 1;
522 			vp->v_lastw = lbn;
523 			return;
524 		}
525 		vp->v_clen = maxclen;
526 		if (!async && maxclen == 0) {	/* I/O not contiguous */
527 			vp->v_cstart = lbn + 1;
528 			bawrite(bp);
529 		} else {	/* Wait for rest of cluster */
530 			vp->v_cstart = lbn;
531 			bdwrite(bp);
532 		}
533 	} else if (lbn == vp->v_cstart + vp->v_clen) {
534 		/*
535 		 * At end of cluster, write it out.
536 		 */
537 		bdwrite(bp);
538 		cluster_wbuild(vp, lblocksize, vp->v_cstart,
539 		    vp->v_clen + 1);
540 		vp->v_clen = 0;
541 		vp->v_cstart = lbn + 1;
542 	} else
543 		/*
544 		 * In the middle of a cluster, so just delay the I/O for now.
545 		 */
546 		bdwrite(bp);
547 	vp->v_lastw = lbn;
548 	vp->v_lasta = bp->b_blkno;
549 }
550 
551 
552 /*
553  * This is an awful lot like cluster_rbuild...wish they could be combined.
554  * The last lbn argument is the current block on which I/O is being
555  * performed.  Check to see that it doesn't fall in the middle of
556  * the current block (if last_bp == NULL).
557  */
558 void
559 cluster_wbuild(vp, size, start_lbn, len)
560 	struct vnode *vp;
561 	long size;
562 	daddr_t start_lbn;
563 	int len;
564 {
565 	struct buf *bp, *tbp;
566 	int i, j, s;
567 	int dbsize = btodb(size);
568 	int origlen = len;
569 
570 redo:
571 	if (len == 0)
572 		return;
573 	if ( ((tbp = incore(vp, start_lbn)) == NULL) ||
574 		((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
575 		++start_lbn;
576 		--len;
577 		goto redo;
578 	}
579 
580 	tbp = getblk(vp, start_lbn, size, 0, 0);
581 	if ((tbp->b_flags & B_DELWRI) == 0) {
582 		++start_lbn;
583 		--len;
584 		brelse(tbp);
585 		goto redo;
586 	}
587 	/*
588 	 * Extra memory in the buffer, punt on this buffer. XXX we could
589 	 * handle this in most cases, but we would have to push the extra
590 	 * memory down to after our max possible cluster size and then
591 	 * potentially pull it back up if the cluster was terminated
592 	 * prematurely--too much hassle.
593 	 */
594 	if (((tbp->b_flags & (B_VMIO|B_CLUSTEROK)) != (B_VMIO|B_CLUSTEROK)) ||
595 		(tbp->b_bcount != tbp->b_bufsize) ||
596 		len == 1) {
597 		bawrite(tbp);
598 		++start_lbn;
599 		--len;
600 		goto redo;
601 	}
602 
603 	bp = trypbuf();
604 	if (bp == NULL) {
605 		bawrite(tbp);
606 		++start_lbn;
607 		--len;
608 		goto redo;
609 	}
610 
611 	TAILQ_INIT(&bp->b_cluster.cluster_head);
612 	bp->b_bcount = 0;
613 	bp->b_bufsize = 0;
614 	bp->b_npages = 0;
615 
616 	bp->b_blkno = tbp->b_blkno;
617 	bp->b_lblkno = tbp->b_lblkno;
618 	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
619 	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
620 	bp->b_iodone = cluster_callback;
621 	pbgetvp(vp, bp);
622 
623 	for (i = 0; i < len; ++i, ++start_lbn) {
624 		if (i != 0) {
625 			s = splbio();
626 			if ((tbp = incore(vp, start_lbn)) == NULL) {
627 				splx(s);
628 				break;
629 			}
630 
631 			if ((tbp->b_flags & (B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI)) != (B_DELWRI|B_CLUSTEROK)) {
632 				splx(s);
633 				break;
634 			}
635 
636 			if ((tbp->b_bcount != size) ||
637 				((bp->b_blkno + dbsize * i) != tbp->b_blkno) ||
638 				((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))) {
639 				splx(s);
640 				break;
641 			}
642 			bremfree(tbp);
643 			tbp->b_flags |= B_BUSY;
644 			tbp->b_flags &= ~B_DONE;
645 			splx(s);
646 		}
647 		for (j = 0; j < tbp->b_npages; j += 1) {
648 			vm_page_t m;
649 			m = tbp->b_pages[j];
650 			++m->busy;
651 			++m->object->paging_in_progress;
652 			if ((bp->b_npages == 0) ||
653 				(bp->b_pages[bp->b_npages - 1] != m)) {
654 				bp->b_pages[bp->b_npages] = m;
655 				bp->b_npages++;
656 			}
657 		}
658 		bp->b_bcount += size;
659 		bp->b_bufsize += size;
660 
661 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
662 		tbp->b_flags |= B_ASYNC;
663 		s = splbio();
664 		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
665 		++tbp->b_vp->v_numoutput;
666 		splx(s);
667 		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
668 			tbp, b_cluster.cluster_entry);
669 	}
670 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
671 		(vm_page_t *) bp->b_pages, bp->b_npages);
672 	bawrite(bp);
673 
674 	len -= i;
675 	goto redo;
676 }
677 
678 #if 0
679 /*
680  * Collect together all the buffers in a cluster.
681  * Plus add one additional buffer.
682  */
683 struct cluster_save *
684 cluster_collectbufs(vp, last_bp)
685 	struct vnode *vp;
686 	struct buf *last_bp;
687 {
688 	struct cluster_save *buflist;
689 	daddr_t lbn;
690 	int i, len;
691 
692 	len = vp->v_lastw - vp->v_cstart + 1;
693 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
694 	    M_SEGMENT, M_WAITOK);
695 	buflist->bs_nchildren = 0;
696 	buflist->bs_children = (struct buf **) (buflist + 1);
697 	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
698 		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
699 		    &buflist->bs_children[i]);
700 	buflist->bs_children[i] = last_bp;
701 	buflist->bs_nchildren = i + 1;
702 	return (buflist);
703 }
704 #endif
705