xref: /freebsd/sys/kern/vfs_cluster.c (revision 8e6b01171e30297084bb0b4457c4183c2746aacc)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $Id: vfs_cluster.c,v 1.21 1995/09/23 21:12:45 dyson Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/malloc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/vmmeter.h>
48 #include <miscfs/specfs/specdev.h>
49 #include <vm/vm.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 
53 #ifdef DEBUG
54 #include <vm/vm.h>
55 #include <sys/sysctl.h>
56 int doreallocblks = 0;
57 struct ctldebug debug13 = {"doreallocblks", &doreallocblks};
58 
59 #else
60 /* XXX for cluster_write */
61 #define doreallocblks 0
62 #endif
63 
64 /*
65  * Local declarations
66  */
67 static struct buf *cluster_rbuild __P((struct vnode *, u_quad_t,
68     daddr_t, daddr_t, long, int));
69 struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
70 
71 int totreads;
72 int totreadblocks;
73 extern vm_page_t bogus_page;
74 
75 #ifdef DIAGNOSTIC
76 /*
77  * Set to 1 if reads of block zero should cause readahead to be done.
78  * Set to 0 treats a read of block zero as a non-sequential read.
79  *
80  * Setting to one assumes that most reads of block zero of files are due to
81  * sequential passes over the files (e.g. cat, sum) where additional blocks
82  * will soon be needed.  Setting to zero assumes that the majority are
83  * surgical strikes to get particular info (e.g. size, file) where readahead
84  * blocks will not be used and, in fact, push out other potentially useful
85  * blocks from the cache.  The former seems intuitive, but some quick tests
86  * showed that the latter performed better from a system-wide point of view.
87  */
88 	int doclusterraz = 0;
89 
90 #define ISSEQREAD(vp, blk) \
91 	(((blk) != 0 || doclusterraz) && \
92 	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
93 #else
94 #define ISSEQREAD(vp, blk) \
95 	(/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
96 #endif
97 
98 /*
99  * allow for three entire read-aheads...  The system will
100  * adjust downwards rapidly if needed...
101  */
102 #define RA_MULTIPLE_FAST	2
103 #define RA_MULTIPLE_SLOW	3
104 #define RA_SHIFTDOWN	1	/* approx lg2(RA_MULTIPLE) */
105 /*
106  * This replaces bread.  If this is a bread at the beginning of a file and
107  * lastr is 0, we assume this is the first read and we'll read up to two
108  * blocks if they are sequential.  After that, we'll do regular read ahead
109  * in clustered chunks.
110  * 	bp is the block requested.
111  *	rbp is the read-ahead block.
112  *	If either is NULL, then you don't have to do the I/O.
113  */
114 int
115 cluster_read(vp, filesize, lblkno, size, cred, bpp)
116 	struct vnode *vp;
117 	u_quad_t filesize;
118 	daddr_t lblkno;
119 	long size;
120 	struct ucred *cred;
121 	struct buf **bpp;
122 {
123 	struct buf *bp, *rbp;
124 	daddr_t blkno, rablkno, origlblkno;
125 	long flags;
126 	int error, num_ra, alreadyincore;
127 	int i;
128 	int seq;
129 
130 	error = 0;
131 	/*
132 	 * get the requested block
133 	 */
134 	origlblkno = lblkno;
135 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
136 	seq = ISSEQREAD(vp, lblkno);
137 	/*
138 	 * if it is in the cache, then check to see if the reads have been
139 	 * sequential.  If they have, then try some read-ahead, otherwise
140 	 * back-off on prospective read-aheads.
141 	 */
142 	if (bp->b_flags & B_CACHE) {
143 		if (!seq) {
144 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
145 			vp->v_ralen >>= RA_SHIFTDOWN;
146 			return 0;
147 		} else if( vp->v_maxra > lblkno) {
148 			if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
149 				if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
150 					++vp->v_ralen;
151 				return 0;
152 			}
153 			lblkno = vp->v_maxra;
154 		} else {
155 			lblkno += 1;
156 		}
157 		bp = NULL;
158 	} else {
159 		/*
160 		 * if it isn't in the cache, then get a chunk from disk if
161 		 * sequential, otherwise just get the block.
162 		 */
163 		bp->b_flags |= B_READ;
164 		lblkno += 1;
165 		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
166 		vp->v_ralen = 0;
167 	}
168 	/*
169 	 * assume no read-ahead
170 	 */
171 	alreadyincore = 1;
172 	rablkno = lblkno;
173 
174 	/*
175 	 * if we have been doing sequential I/O, then do some read-ahead
176 	 */
177 	if (seq) {
178 
179 	/*
180 	 * bump ralen a bit...
181 	 */
182 		if ((vp->v_ralen + 1) < RA_MULTIPLE_SLOW*(MAXPHYS / size))
183 			++vp->v_ralen;
184 		/*
185 		 * this code makes sure that the stuff that we have read-ahead
186 		 * is still in the cache.  If it isn't, we have been reading
187 		 * ahead too much, and we need to back-off, otherwise we might
188 		 * try to read more.
189 		 */
190 		for (i = 0; i < vp->v_ralen; i++) {
191 			rablkno = lblkno + i;
192 			alreadyincore = (int) incore(vp, rablkno);
193 			if (!alreadyincore) {
194 				if (inmem(vp, rablkno)) {
195 					struct buf *bpt;
196 					if (vp->v_maxra < rablkno)
197 						vp->v_maxra = rablkno + 1;
198 					continue;
199 				}
200 				if (rablkno < vp->v_maxra) {
201 					vp->v_maxra = rablkno;
202 					vp->v_ralen >>= RA_SHIFTDOWN;
203 					alreadyincore = 1;
204 				}
205 				break;
206 			} else if (vp->v_maxra < rablkno) {
207 				vp->v_maxra = rablkno + 1;
208 			}
209 		}
210 	}
211 	/*
212 	 * we now build the read-ahead buffer if it is desirable.
213 	 */
214 	rbp = NULL;
215 	if (!alreadyincore &&
216 	    (rablkno + 1) * size <= filesize &&
217 	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
218 	    blkno != -1) {
219 		if (num_ra > vp->v_ralen)
220 			num_ra = vp->v_ralen;
221 
222 		if (num_ra) {
223 			rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
224 				num_ra + 1);
225 		} else {
226 			rbp = getblk(vp, rablkno, size, 0, 0);
227 			rbp->b_flags |= B_READ | B_ASYNC;
228 			rbp->b_blkno = blkno;
229 		}
230 	}
231 
232 	/*
233 	 * handle the synchronous read
234 	 */
235 	if (bp) {
236 		if (bp->b_flags & (B_DONE | B_DELWRI))
237 			panic("cluster_read: DONE bp");
238 		else {
239 			vfs_busy_pages(bp, 0);
240 			error = VOP_STRATEGY(bp);
241 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
242 			totreads++;
243 			totreadblocks += bp->b_bcount / size;
244 			curproc->p_stats->p_ru.ru_inblock++;
245 		}
246 	}
247 	/*
248 	 * and if we have read-aheads, do them too
249 	 */
250 	if (rbp) {
251 		vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
252 		if (error || (rbp->b_flags & B_CACHE)) {
253 			rbp->b_flags &= ~(B_ASYNC | B_READ);
254 			brelse(rbp);
255 		} else {
256 			if ((rbp->b_flags & B_CLUSTER) == 0)
257 				vfs_busy_pages(rbp, 0);
258 			(void) VOP_STRATEGY(rbp);
259 			totreads++;
260 			totreadblocks += rbp->b_bcount / size;
261 			curproc->p_stats->p_ru.ru_inblock++;
262 		}
263 	}
264 	if (bp && ((bp->b_flags & B_ASYNC) == 0))
265 		return (biowait(bp));
266 	return (error);
267 }
268 
269 /*
270  * If blocks are contiguous on disk, use this to provide clustered
271  * read ahead.  We will read as many blocks as possible sequentially
272  * and then parcel them up into logical blocks in the buffer hash table.
273  */
274 static struct buf *
275 cluster_rbuild(vp, filesize, lbn, blkno, size, run)
276 	struct vnode *vp;
277 	u_quad_t filesize;
278 	daddr_t lbn;
279 	daddr_t blkno;
280 	long size;
281 	int run;
282 {
283 	struct cluster_save *b_save;
284 	struct buf *bp, *tbp;
285 	daddr_t bn;
286 	int i, inc, j;
287 
288 #ifdef DIAGNOSTIC
289 	if (size != vp->v_mount->mnt_stat.f_iosize)
290 		panic("cluster_rbuild: size %d != filesize %d\n",
291 		    size, vp->v_mount->mnt_stat.f_iosize);
292 #endif
293 	if (size * (lbn + run) > filesize)
294 		--run;
295 
296 	tbp = getblk(vp, lbn, size, 0, 0);
297 	if (tbp->b_flags & B_CACHE)
298 		return tbp;
299 
300 	tbp->b_blkno = blkno;
301 	tbp->b_flags |= B_ASYNC | B_READ;
302 	if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
303 		return tbp;
304 
305 	bp = trypbuf();
306 	if (bp == 0)
307 		return tbp;
308 
309 	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
310 	bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO;
311 	bp->b_iodone = cluster_callback;
312 	bp->b_blkno = blkno;
313 	bp->b_lblkno = lbn;
314 	pbgetvp(vp, bp);
315 
316 	b_save = malloc(sizeof(struct buf *) * run +
317 		sizeof(struct cluster_save), M_SEGMENT, M_WAITOK);
318 	b_save->bs_nchildren = 0;
319 	b_save->bs_children = (struct buf **) (b_save + 1);
320 	bp->b_saveaddr = b_save;
321 
322 	bp->b_bcount = 0;
323 	bp->b_bufsize = 0;
324 	bp->b_npages = 0;
325 
326 	inc = btodb(size);
327 	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
328 		if (i != 0) {
329 			if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
330 				break;
331 
332 			if (incore(vp, lbn + i))
333 				break;
334 			tbp = getblk(vp, lbn + i, size, 0, 0);
335 
336 			if ((tbp->b_flags & B_CACHE) ||
337 				(tbp->b_flags & B_VMIO) == 0) {
338 				brelse(tbp);
339 				break;
340 			}
341 
342 			for (j=0;j<tbp->b_npages;j++) {
343 				if (tbp->b_pages[j]->valid) {
344 					break;
345 				}
346 			}
347 
348 			if (j != tbp->b_npages) {
349 				/*
350 				 * force buffer to be re-constituted later
351 				 */
352 				tbp->b_flags |= B_RELBUF;
353 				brelse(tbp);
354 				break;
355 			}
356 
357 			tbp->b_flags |= B_READ | B_ASYNC;
358 			if( tbp->b_blkno == tbp->b_lblkno) {
359 				tbp->b_blkno = bn;
360 			} else if (tbp->b_blkno != bn) {
361 				brelse(tbp);
362 				break;
363 			}
364 		}
365 		++b_save->bs_nchildren;
366 		b_save->bs_children[i] = tbp;
367 		for (j = 0; j < tbp->b_npages; j += 1) {
368 			vm_page_t m;
369 			m = tbp->b_pages[j];
370 			++m->busy;
371 			++m->object->paging_in_progress;
372 			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
373 				m = bogus_page;
374 			}
375 			if ((bp->b_npages == 0) ||
376 				(bp->b_bufsize & PAGE_MASK) == 0) {
377 				bp->b_pages[bp->b_npages] = m;
378 				bp->b_npages++;
379 			} else {
380 				if ( tbp->b_npages > 1) {
381 					panic("cluster_rbuild: page unaligned filesystems not supported");
382 				}
383 			}
384 		}
385 		bp->b_bcount += tbp->b_bcount;
386 		bp->b_bufsize += tbp->b_bufsize;
387 	}
388 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
389 		(vm_page_t *)bp->b_pages, bp->b_npages);
390 	return (bp);
391 }
392 
393 /*
394  * Cleanup after a clustered read or write.
395  * This is complicated by the fact that any of the buffers might have
396  * extra memory (if there were no empty buffer headers at allocbuf time)
397  * that we will need to shift around.
398  */
399 void
400 cluster_callback(bp)
401 	struct buf *bp;
402 {
403 	struct cluster_save *b_save;
404 	struct buf **bpp, *tbp;
405 	caddr_t cp;
406 	int error = 0;
407 
408 	/*
409 	 * Must propogate errors to all the components.
410 	 */
411 	if (bp->b_flags & B_ERROR)
412 		error = bp->b_error;
413 
414 	b_save = (struct cluster_save *) (bp->b_saveaddr);
415 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
416 	/*
417 	 * Move memory from the large cluster buffer into the component
418 	 * buffers and mark IO as done on these.
419 	 */
420 	for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
421 		tbp = *bpp;
422 		if (error) {
423 			tbp->b_flags |= B_ERROR;
424 			tbp->b_error = error;
425 		}
426 		biodone(tbp);
427 	}
428 	free(b_save, M_SEGMENT);
429 	relpbuf(bp);
430 }
431 
432 /*
433  * Do clustered write for FFS.
434  *
435  * Three cases:
436  *	1. Write is not sequential (write asynchronously)
437  *	Write is sequential:
438  *	2.	beginning of cluster - begin cluster
439  *	3.	middle of a cluster - add to cluster
440  *	4.	end of a cluster - asynchronously write cluster
441  */
442 void
443 cluster_write(bp, filesize)
444 	struct buf *bp;
445 	u_quad_t filesize;
446 {
447 	struct vnode *vp;
448 	daddr_t lbn;
449 	int maxclen, cursize;
450 	int lblocksize;
451 
452 	vp = bp->b_vp;
453 	lblocksize = vp->v_mount->mnt_stat.f_iosize;
454 	lbn = bp->b_lblkno;
455 
456 	/* Initialize vnode to beginning of file. */
457 	if (lbn == 0)
458 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
459 
460 	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
461 	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
462 		maxclen = MAXPHYS / lblocksize - 1;
463 		if (vp->v_clen != 0) {
464 			/*
465 			 * Next block is not sequential.
466 			 *
467 			 * If we are not writing at end of file, the process
468 			 * seeked to another point in the file since its last
469 			 * write, or we have reached our maximum cluster size,
470 			 * then push the previous cluster. Otherwise try
471 			 * reallocating to make it sequential.
472 			 */
473 			cursize = vp->v_lastw - vp->v_cstart + 1;
474 			if (!doreallocblks ||
475 			    (lbn + 1) * lblocksize != filesize ||
476 			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
477 				cluster_wbuild(vp, NULL, lblocksize,
478 				    vp->v_cstart, cursize, lbn);
479 			} else {
480 				struct buf **bpp, **endbp;
481 				struct cluster_save *buflist;
482 
483 				buflist = cluster_collectbufs(vp, bp);
484 				endbp = &buflist->bs_children
485 				    [buflist->bs_nchildren - 1];
486 				if (VOP_REALLOCBLKS(vp, buflist)) {
487 					/*
488 					 * Failed, push the previous cluster.
489 					 */
490 					for (bpp = buflist->bs_children;
491 					     bpp < endbp; bpp++)
492 						brelse(*bpp);
493 					free(buflist, M_SEGMENT);
494 					cluster_wbuild(vp, NULL, lblocksize,
495 					    vp->v_cstart, cursize, lbn);
496 				} else {
497 					/*
498 					 * Succeeded, keep building cluster.
499 					 */
500 					for (bpp = buflist->bs_children;
501 					     bpp <= endbp; bpp++)
502 						bdwrite(*bpp);
503 					free(buflist, M_SEGMENT);
504 					vp->v_lastw = lbn;
505 					vp->v_lasta = bp->b_blkno;
506 					return;
507 				}
508 			}
509 		}
510 		/*
511 		 * Consider beginning a cluster. If at end of file, make
512 		 * cluster as large as possible, otherwise find size of
513 		 * existing cluster.
514 		 */
515 		if ((lbn + 1) * lblocksize != filesize &&
516 		    (bp->b_blkno == bp->b_lblkno) &&
517 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
518 		     bp->b_blkno == -1)) {
519 			bawrite(bp);
520 			vp->v_clen = 0;
521 			vp->v_lasta = bp->b_blkno;
522 			vp->v_cstart = lbn + 1;
523 			vp->v_lastw = lbn;
524 			return;
525 		}
526 		vp->v_clen = maxclen;
527 		if (maxclen == 0) {	/* I/O not contiguous */
528 			vp->v_cstart = lbn + 1;
529 			bawrite(bp);
530 		} else {	/* Wait for rest of cluster */
531 			vp->v_cstart = lbn;
532 			bdwrite(bp);
533 		}
534 	} else if (lbn == vp->v_cstart + vp->v_clen) {
535 		/*
536 		 * At end of cluster, write it out.
537 		 */
538 		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
539 		    vp->v_clen + 1, lbn);
540 		vp->v_clen = 0;
541 		vp->v_cstart = lbn + 1;
542 	} else
543 		/*
544 		 * In the middle of a cluster, so just delay the I/O for now.
545 		 */
546 		bdwrite(bp);
547 	vp->v_lastw = lbn;
548 	vp->v_lasta = bp->b_blkno;
549 }
550 
551 
552 /*
553  * This is an awful lot like cluster_rbuild...wish they could be combined.
554  * The last lbn argument is the current block on which I/O is being
555  * performed.  Check to see that it doesn't fall in the middle of
556  * the current block (if last_bp == NULL).
557  */
558 void
559 cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
560 	struct vnode *vp;
561 	struct buf *last_bp;
562 	long size;
563 	daddr_t start_lbn;
564 	int len;
565 	daddr_t lbn;
566 {
567 	struct cluster_save *b_save;
568 	struct buf *bp, *tbp, *pb;
569 	caddr_t cp;
570 	int i, j, s;
571 
572 #ifdef DIAGNOSTIC
573 	if (size != vp->v_mount->mnt_stat.f_iosize)
574 		panic("cluster_wbuild: size %d != filesize %d\n",
575 		    size, vp->v_mount->mnt_stat.f_iosize);
576 #endif
577 redo:
578 	if( (lbn != -1) || (last_bp == 0)) {
579 		while ((!(tbp = incore(vp, start_lbn)) || (tbp->b_flags & B_BUSY)
580 			|| (start_lbn == lbn)) && len) {
581 			++start_lbn;
582 			--len;
583 		}
584 
585 		pb = trypbuf();
586 		/* Get more memory for current buffer */
587 		if (len <= 1 || pb == NULL) {
588 			if (pb != NULL)
589 				relpbuf(pb);
590 			if (last_bp) {
591 				bawrite(last_bp);
592 			} else if (len) {
593 				bp = getblk(vp, start_lbn, size, 0, 0);
594 				bawrite(bp);
595 			}
596 			return;
597 		}
598 		tbp = getblk(vp, start_lbn, size, 0, 0);
599 	} else {
600 		tbp = last_bp;
601 		if( tbp->b_flags & B_BUSY) {
602 			printf("vfs_cluster: warning: buffer already busy\n");
603 		}
604 		tbp->b_flags |= B_BUSY;
605 		last_bp = 0;
606 		pb = trypbuf();
607 		if (pb == NULL) {
608 			bawrite(tbp);
609 			return;
610 		}
611 	}
612 
613 	if (!(tbp->b_flags & B_DELWRI)) {
614 		relpbuf(pb);
615 		++start_lbn;
616 		--len;
617 		brelse(tbp);
618 		goto redo;
619 	}
620 	/*
621 	 * Extra memory in the buffer, punt on this buffer. XXX we could
622 	 * handle this in most cases, but we would have to push the extra
623 	 * memory down to after our max possible cluster size and then
624 	 * potentially pull it back up if the cluster was terminated
625 	 * prematurely--too much hassle.
626 	 */
627 	if (((tbp->b_flags & B_VMIO) == 0) ||
628 		(tbp->b_bcount != tbp->b_bufsize)) {
629 		relpbuf(pb);
630 		++start_lbn;
631 		--len;
632 		bawrite(tbp);
633 		goto redo;
634 	}
635 	bp = pb;
636 	b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
637 	    M_SEGMENT, M_WAITOK);
638 	b_save->bs_nchildren = 0;
639 	b_save->bs_children = (struct buf **) (b_save + 1);
640 	bp->b_saveaddr = b_save;
641 	bp->b_bcount = 0;
642 	bp->b_bufsize = 0;
643 	bp->b_npages = 0;
644 
645 	if (tbp->b_flags & B_VMIO)
646 		bp->b_flags |= B_VMIO;
647 
648 	bp->b_blkno = tbp->b_blkno;
649 	bp->b_lblkno = tbp->b_lblkno;
650 	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
651 	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
652 	bp->b_iodone = cluster_callback;
653 	pbgetvp(vp, bp);
654 
655 	for (i = 0; i < len; ++i, ++start_lbn) {
656 		if (i != 0) {
657 			/*
658 			 * Block is not in core or the non-sequential block
659 			 * ending our cluster was part of the cluster (in
660 			 * which case we don't want to write it twice).
661 			 */
662 			if (!(tbp = incore(vp, start_lbn)) ||
663 			    (last_bp == NULL && start_lbn == lbn))
664 				break;
665 
666 			if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK)
667 				break;
668 
669 			if ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))
670 				break;
671 
672 			if ( (tbp->b_blkno != tbp->b_lblkno) &&
673 				((bp->b_blkno + btodb(size) * i) != tbp->b_blkno))
674 				break;
675 
676 			/*
677 			 * Get the desired block buffer (unless it is the
678 			 * final sequential block whose buffer was passed in
679 			 * explictly as last_bp).
680 			 */
681 			if (last_bp == NULL || start_lbn != lbn) {
682 				if( tbp->b_flags & B_BUSY)
683 					break;
684 				tbp = getblk(vp, start_lbn, size, 0, 0);
685 				if (!(tbp->b_flags & B_DELWRI) ||
686 				    ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
687 					brelse(tbp);
688 					break;
689 				}
690 			} else
691 				tbp = last_bp;
692 		}
693 		for (j = 0; j < tbp->b_npages; j += 1) {
694 			vm_page_t m;
695 			m = tbp->b_pages[j];
696 			++m->busy;
697 			++m->object->paging_in_progress;
698 			if ((bp->b_npages == 0) ||
699 				(bp->b_pages[bp->b_npages - 1] != m)) {
700 				bp->b_pages[bp->b_npages] = m;
701 				bp->b_npages++;
702 			}
703 		}
704 		bp->b_bcount += size;
705 		bp->b_bufsize += size;
706 
707 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
708 		tbp->b_flags |= B_ASYNC;
709 		s = splbio();
710 		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
711 		++tbp->b_vp->v_numoutput;
712 		splx(s);
713 		b_save->bs_children[i] = tbp;
714 	}
715 	b_save->bs_nchildren = i;
716 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
717 		(vm_page_t *) bp->b_pages, bp->b_npages);
718 	bawrite(bp);
719 
720 	if (i < len) {
721 		len -= i;
722 		goto redo;
723 	}
724 }
725 
726 /*
727  * Collect together all the buffers in a cluster.
728  * Plus add one additional buffer.
729  */
730 struct cluster_save *
731 cluster_collectbufs(vp, last_bp)
732 	struct vnode *vp;
733 	struct buf *last_bp;
734 {
735 	struct cluster_save *buflist;
736 	daddr_t lbn;
737 	int i, len;
738 
739 	len = vp->v_lastw - vp->v_cstart + 1;
740 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
741 	    M_SEGMENT, M_WAITOK);
742 	buflist->bs_nchildren = 0;
743 	buflist->bs_children = (struct buf **) (buflist + 1);
744 	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
745 		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
746 		    &buflist->bs_children[i]);
747 	buflist->bs_children[i] = last_bp;
748 	buflist->bs_nchildren = i + 1;
749 	return (buflist);
750 }
751