xref: /freebsd/sys/kern/vfs_cluster.c (revision 5ebc7e6281887681c3a348a5a4c902e262ccd656)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $Id: vfs_cluster.c,v 1.14 1995/03/19 13:28:54 davidg Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/malloc.h>
46 #include <sys/resourcevar.h>
47 #include <sys/vmmeter.h>
48 #include <miscfs/specfs/specdev.h>
49 #include <vm/vm.h>
50 #include <vm/vm_pageout.h>
51 
52 #ifdef DEBUG
53 #include <vm/vm.h>
54 #include <sys/sysctl.h>
55 int doreallocblks = 0;
56 struct ctldebug debug13 = {"doreallocblks", &doreallocblks};
57 
58 #else
59 /* XXX for cluster_write */
60 #define doreallocblks 0
61 #endif
62 
63 /*
64  * Local declarations
65  */
66 struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
67     daddr_t, daddr_t, long, int, long));
68 struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
69 
70 int totreads;
71 int totreadblocks;
72 
73 #ifdef DIAGNOSTIC
74 /*
75  * Set to 1 if reads of block zero should cause readahead to be done.
76  * Set to 0 treats a read of block zero as a non-sequential read.
77  *
78  * Setting to one assumes that most reads of block zero of files are due to
79  * sequential passes over the files (e.g. cat, sum) where additional blocks
80  * will soon be needed.  Setting to zero assumes that the majority are
81  * surgical strikes to get particular info (e.g. size, file) where readahead
82  * blocks will not be used and, in fact, push out other potentially useful
83  * blocks from the cache.  The former seems intuitive, but some quick tests
84  * showed that the latter performed better from a system-wide point of view.
85  */
86 	int doclusterraz = 0;
87 
88 #define ISSEQREAD(vp, blk) \
89 	(((blk) != 0 || doclusterraz) && \
90 	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
91 #else
92 #define ISSEQREAD(vp, blk) \
93 	(/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
94 #endif
95 
96 /*
97  * This replaces bread.  If this is a bread at the beginning of a file and
98  * lastr is 0, we assume this is the first read and we'll read up to two
99  * blocks if they are sequential.  After that, we'll do regular read ahead
100  * in clustered chunks.
101  * 	bp is the block requested.
102  *	rbp is the read-ahead block.
103  *	If either is NULL, then you don't have to do the I/O.
104  */
105 int
106 cluster_read(vp, filesize, lblkno, size, cred, bpp)
107 	struct vnode *vp;
108 	u_quad_t filesize;
109 	daddr_t lblkno;
110 	long size;
111 	struct ucred *cred;
112 	struct buf **bpp;
113 {
114 	struct buf *bp, *rbp;
115 	daddr_t blkno, rablkno, origlblkno;
116 	long flags;
117 	int error, num_ra, alreadyincore;
118 
119 	origlblkno = lblkno;
120 	error = 0;
121 	/*
122 	 * get the requested block
123 	 */
124 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
125 	/*
126 	 * if it is in the cache, then check to see if the reads have been
127 	 * sequential.  If they have, then try some read-ahead, otherwise
128 	 * back-off on prospective read-aheads.
129 	 */
130 	if (bp->b_flags & B_CACHE) {
131 		int i;
132 
133 		if (!ISSEQREAD(vp, origlblkno)) {
134 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
135 			vp->v_ralen >>= 1;
136 			return 0;
137 		} else if( vp->v_maxra >= origlblkno) {
138 			if ((vp->v_ralen + 1) < (MAXPHYS / size))
139 				vp->v_ralen++;
140 			if ( vp->v_maxra >= (origlblkno + vp->v_ralen))
141 				return 0;
142 			lblkno = vp->v_maxra;
143 		}
144 		bp = NULL;
145 	} else {
146 		/*
147 		 * if it isn't in the cache, then get a chunk from disk if
148 		 * sequential, otherwise just get the block.
149 		 */
150 		bp->b_flags |= B_READ;
151 		lblkno += 1;
152 		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
153 	}
154 	/*
155 	 * if ralen is "none", then try a little
156 	 */
157 	if (vp->v_ralen == 0)
158 		vp->v_ralen = 1;
159 	/*
160 	 * assume no read-ahead
161 	 */
162 	alreadyincore = 1;
163 	rablkno = lblkno;
164 
165 	/*
166 	 * if we have been doing sequential I/O, then do some read-ahead
167 	 */
168 	if (ISSEQREAD(vp, origlblkno)) {
169 		int i;
170 
171 		/*
172 		 * this code makes sure that the stuff that we have read-ahead
173 		 * is still in the cache.  If it isn't, we have been reading
174 		 * ahead too much, and we need to back-off, otherwise we might
175 		 * try to read more.
176 		 */
177 		for (i = 0; i < vp->v_ralen; i++) {
178 			rablkno = lblkno + i;
179 			alreadyincore = (int) incore(vp, rablkno);
180 			if (!alreadyincore) {
181 				if (rablkno < vp->v_maxra) {
182 					vp->v_maxra = rablkno;
183 					vp->v_ralen >>= 1;
184 					alreadyincore = 1;
185 				} else {
186 					if (inmem(vp, rablkno)) {
187 						if( vp->v_maxra < rablkno)
188 							vp->v_maxra = rablkno + 1;
189 						continue;
190 					}
191 					if ((vp->v_ralen + 1) < MAXPHYS / size)
192 						vp->v_ralen++;
193 				}
194 				break;
195 			} else if( vp->v_maxra < rablkno) {
196 				vp->v_maxra = rablkno + 1;
197 			}
198 		}
199 	}
200 	/*
201 	 * we now build the read-ahead buffer if it is desirable.
202 	 */
203 	rbp = NULL;
204 	if (!alreadyincore &&
205 	    (rablkno + 1) * size <= filesize &&
206 	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra)) &&
207 	    blkno != -1) {
208 		if ((vp->v_ralen + 1) < MAXPHYS / size)
209 			vp->v_ralen++;
210 		if (num_ra > vp->v_ralen)
211 			num_ra = vp->v_ralen;
212 
213 		if (num_ra) {
214 			rbp = cluster_rbuild(vp, filesize,
215 			    NULL, rablkno, blkno, size, num_ra, B_READ | B_ASYNC);
216 		} else {
217 			rbp = getblk(vp, rablkno, size, 0, 0);
218 			rbp->b_flags |= B_READ | B_ASYNC;
219 			rbp->b_blkno = blkno;
220 		}
221 	}
222 
223 	/*
224 	 * if the synchronous read is a cluster, handle it, otherwise do a
225 	 * simple, non-clustered read.
226 	 */
227 	if (bp) {
228 		if (bp->b_flags & (B_DONE | B_DELWRI))
229 			panic("cluster_read: DONE bp");
230 		else {
231 			vfs_busy_pages(bp, 0);
232 			error = VOP_STRATEGY(bp);
233 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
234 			totreads++;
235 			totreadblocks += bp->b_bcount / size;
236 			curproc->p_stats->p_ru.ru_inblock++;
237 		}
238 	}
239 	/*
240 	 * and if we have read-aheads, do them too
241 	 */
242 	if (rbp) {
243 		vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
244 		if (error || (rbp->b_flags & B_CACHE)) {
245 			rbp->b_flags &= ~(B_ASYNC | B_READ);
246 			brelse(rbp);
247 		} else {
248 			vfs_busy_pages(rbp, 0);
249 			(void) VOP_STRATEGY(rbp);
250 			totreads++;
251 			totreadblocks += rbp->b_bcount / size;
252 			curproc->p_stats->p_ru.ru_inblock++;
253 		}
254 	}
255 	if (bp && ((bp->b_flags & B_ASYNC) == 0))
256 		return (biowait(bp));
257 	return (error);
258 }
259 
260 /*
261  * If blocks are contiguous on disk, use this to provide clustered
262  * read ahead.  We will read as many blocks as possible sequentially
263  * and then parcel them up into logical blocks in the buffer hash table.
264  */
265 struct buf *
266 cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
267 	struct vnode *vp;
268 	u_quad_t filesize;
269 	struct buf *bp;
270 	daddr_t lbn;
271 	daddr_t blkno;
272 	long size;
273 	int run;
274 	long flags;
275 {
276 	struct cluster_save *b_save;
277 	struct buf *tbp;
278 	daddr_t bn;
279 	int i, inc, j;
280 
281 #ifdef DIAGNOSTIC
282 	if (size != vp->v_mount->mnt_stat.f_iosize)
283 		panic("cluster_rbuild: size %d != filesize %d\n",
284 		    size, vp->v_mount->mnt_stat.f_iosize);
285 #endif
286 	if (size * (lbn + run + 1) > filesize)
287 		--run;
288 	if (run == 0) {
289 		if (!bp) {
290 			bp = getblk(vp, lbn, size, 0, 0);
291 			bp->b_blkno = blkno;
292 			bp->b_flags |= flags;
293 		}
294 		return (bp);
295 	}
296 	tbp = bp;
297 	if (!tbp) {
298 		tbp = getblk(vp, lbn, size, 0, 0);
299 	}
300 	if (tbp->b_flags & B_CACHE) {
301 		return (tbp);
302 	} else if (bp == NULL) {
303 		tbp->b_flags |= B_ASYNC;
304 	}
305 	bp = getpbuf();
306 	bp->b_flags = flags | B_CALL | B_BUSY | B_CLUSTER;
307 	bp->b_iodone = cluster_callback;
308 	bp->b_blkno = blkno;
309 	bp->b_lblkno = lbn;
310 	pbgetvp(vp, bp);
311 
312 	b_save = malloc(sizeof(struct buf *) * (run + 1) + sizeof(struct cluster_save),
313 	    M_SEGMENT, M_WAITOK);
314 	b_save->bs_nchildren = 0;
315 	b_save->bs_children = (struct buf **) (b_save + 1);
316 	bp->b_saveaddr = b_save;
317 
318 	bp->b_bcount = 0;
319 	bp->b_bufsize = 0;
320 	bp->b_npages = 0;
321 
322 	if (tbp->b_flags & B_VMIO)
323 		bp->b_flags |= B_VMIO;
324 
325 	inc = btodb(size);
326 	for (bn = blkno, i = 0; i <= run; ++i, bn += inc) {
327 		if (i != 0) {
328 			tbp = getblk(vp, lbn + i, size, 0, 0);
329 			if ((tbp->b_flags & B_CACHE) ||
330 			    (tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO)) {
331 				brelse(tbp);
332 				break;
333 			}
334 			tbp->b_blkno = bn;
335 			tbp->b_flags |= flags | B_READ | B_ASYNC;
336 		} else {
337 			tbp->b_flags |= flags | B_READ;
338 		}
339 		++b_save->bs_nchildren;
340 		b_save->bs_children[i] = tbp;
341 		for (j = 0; j < tbp->b_npages; j += 1) {
342 			bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
343 		}
344 		bp->b_npages += tbp->b_npages;
345 		bp->b_bcount += size;
346 		bp->b_bufsize += size;
347 	}
348 	pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *)bp->b_pages, bp->b_npages);
349 	return (bp);
350 }
351 
352 /*
353  * Cleanup after a clustered read or write.
354  * This is complicated by the fact that any of the buffers might have
355  * extra memory (if there were no empty buffer headers at allocbuf time)
356  * that we will need to shift around.
357  */
358 void
359 cluster_callback(bp)
360 	struct buf *bp;
361 {
362 	struct cluster_save *b_save;
363 	struct buf **bpp, *tbp;
364 	caddr_t cp;
365 	int error = 0;
366 
367 	/*
368 	 * Must propogate errors to all the components.
369 	 */
370 	if (bp->b_flags & B_ERROR)
371 		error = bp->b_error;
372 
373 	b_save = (struct cluster_save *) (bp->b_saveaddr);
374 	pmap_qremove((vm_offset_t) bp->b_data, bp->b_npages);
375 	/*
376 	 * Move memory from the large cluster buffer into the component
377 	 * buffers and mark IO as done on these.
378 	 */
379 	for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
380 		tbp = *bpp;
381 		if (error) {
382 			tbp->b_flags |= B_ERROR;
383 			tbp->b_error = error;
384 		}
385 		biodone(tbp);
386 	}
387 	free(b_save, M_SEGMENT);
388 	relpbuf(bp);
389 }
390 
391 /*
392  * Do clustered write for FFS.
393  *
394  * Three cases:
395  *	1. Write is not sequential (write asynchronously)
396  *	Write is sequential:
397  *	2.	beginning of cluster - begin cluster
398  *	3.	middle of a cluster - add to cluster
399  *	4.	end of a cluster - asynchronously write cluster
400  */
401 void
402 cluster_write(bp, filesize)
403 	struct buf *bp;
404 	u_quad_t filesize;
405 {
406 	struct vnode *vp;
407 	daddr_t lbn;
408 	int maxclen, cursize;
409 	int lblocksize;
410 
411 	vp = bp->b_vp;
412 	lblocksize = vp->v_mount->mnt_stat.f_iosize;
413 	lbn = bp->b_lblkno;
414 
415 	/* Initialize vnode to beginning of file. */
416 	if (lbn == 0)
417 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
418 
419 	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
420 	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
421 		maxclen = MAXPHYS / lblocksize - 1;
422 		if (vp->v_clen != 0) {
423 			/*
424 			 * Next block is not sequential.
425 			 *
426 			 * If we are not writing at end of file, the process
427 			 * seeked to another point in the file since its last
428 			 * write, or we have reached our maximum cluster size,
429 			 * then push the previous cluster. Otherwise try
430 			 * reallocating to make it sequential.
431 			 */
432 			cursize = vp->v_lastw - vp->v_cstart + 1;
433 			cluster_wbuild(vp, NULL, lblocksize,
434 			    vp->v_cstart, cursize, lbn);
435 		}
436 		/*
437 		 * Consider beginning a cluster. If at end of file, make
438 		 * cluster as large as possible, otherwise find size of
439 		 * existing cluster.
440 		 */
441 		if ((lbn + 1) * lblocksize != filesize &&
442 		    (bp->b_blkno == bp->b_lblkno) &&
443 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
444 			bp->b_blkno == -1)) {
445 			bawrite(bp);
446 			vp->v_clen = 0;
447 			vp->v_lasta = bp->b_blkno;
448 			vp->v_cstart = lbn + 1;
449 			vp->v_lastw = lbn;
450 			return;
451 		}
452 		vp->v_clen = maxclen;
453 		if (maxclen == 0) {	/* I/O not contiguous */
454 			vp->v_cstart = lbn + 1;
455 			bawrite(bp);
456 		} else {	/* Wait for rest of cluster */
457 			vp->v_cstart = lbn;
458 			bdwrite(bp);
459 		}
460 	} else if (lbn == vp->v_cstart + vp->v_clen) {
461 		/*
462 		 * At end of cluster, write it out.
463 		 */
464 		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
465 		    vp->v_clen + 1, lbn);
466 		vp->v_clen = 0;
467 		vp->v_cstart = lbn + 1;
468 	} else
469 		/*
470 		 * In the middle of a cluster, so just delay the I/O for now.
471 		 */
472 		bdwrite(bp);
473 	vp->v_lastw = lbn;
474 	vp->v_lasta = bp->b_blkno;
475 }
476 
477 
478 /*
479  * This is an awful lot like cluster_rbuild...wish they could be combined.
480  * The last lbn argument is the current block on which I/O is being
481  * performed.  Check to see that it doesn't fall in the middle of
482  * the current block (if last_bp == NULL).
483  */
484 void
485 cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
486 	struct vnode *vp;
487 	struct buf *last_bp;
488 	long size;
489 	daddr_t start_lbn;
490 	int len;
491 	daddr_t lbn;
492 {
493 	struct cluster_save *b_save;
494 	struct buf *bp, *tbp, *pb;
495 	caddr_t cp;
496 	int i, j, s;
497 
498 #ifdef DIAGNOSTIC
499 	if (size != vp->v_mount->mnt_stat.f_iosize)
500 		panic("cluster_wbuild: size %d != filesize %d\n",
501 		    size, vp->v_mount->mnt_stat.f_iosize);
502 #endif
503 redo:
504 	if( (lbn != -1) || (last_bp == 0)) {
505 		while ((!(tbp = incore(vp, start_lbn)) || (tbp->b_flags & B_BUSY)
506 			|| (start_lbn == lbn)) && len) {
507 			++start_lbn;
508 			--len;
509 		}
510 
511 		pb = trypbuf();
512 		/* Get more memory for current buffer */
513 		if (len <= 1 || pb == NULL) {
514 			if (pb != NULL)
515 				relpbuf(pb);
516 			if (last_bp) {
517 				bawrite(last_bp);
518 			} else if (len) {
519 				bp = getblk(vp, start_lbn, size, 0, 0);
520 				bawrite(bp);
521 			}
522 			return;
523 		}
524 		tbp = getblk(vp, start_lbn, size, 0, 0);
525 	} else {
526 		tbp = last_bp;
527 		if( tbp->b_flags & B_BUSY) {
528 			printf("vfs_cluster: warning: buffer already busy\n");
529 		}
530 		tbp->b_flags |= B_BUSY;
531 		last_bp = 0;
532 		pb = trypbuf();
533 		if (pb == NULL) {
534 			bawrite(tbp);
535 			return;
536 		}
537 	}
538 
539 	if (!(tbp->b_flags & B_DELWRI)) {
540 		relpbuf(pb);
541 		++start_lbn;
542 		--len;
543 		brelse(tbp);
544 		goto redo;
545 	}
546 	/*
547 	 * Extra memory in the buffer, punt on this buffer. XXX we could
548 	 * handle this in most cases, but we would have to push the extra
549 	 * memory down to after our max possible cluster size and then
550 	 * potentially pull it back up if the cluster was terminated
551 	 * prematurely--too much hassle.
552 	 */
553 	if (tbp->b_bcount != tbp->b_bufsize) {
554 		relpbuf(pb);
555 		++start_lbn;
556 		--len;
557 		bawrite(tbp);
558 		goto redo;
559 	}
560 	bp = pb;
561 	b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
562 	    M_SEGMENT, M_WAITOK);
563 	b_save->bs_nchildren = 0;
564 	b_save->bs_children = (struct buf **) (b_save + 1);
565 	bp->b_saveaddr = b_save;
566 	bp->b_bcount = 0;
567 	bp->b_bufsize = 0;
568 	bp->b_npages = 0;
569 
570 	if (tbp->b_flags & B_VMIO)
571 		bp->b_flags |= B_VMIO;
572 
573 	bp->b_blkno = tbp->b_blkno;
574 	bp->b_lblkno = tbp->b_lblkno;
575 	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
576 	bp->b_iodone = cluster_callback;
577 	pbgetvp(vp, bp);
578 
579 	for (i = 0; i < len; ++i, ++start_lbn) {
580 		if (i != 0) {
581 			/*
582 			 * Block is not in core or the non-sequential block
583 			 * ending our cluster was part of the cluster (in
584 			 * which case we don't want to write it twice).
585 			 */
586 			if (!(tbp = incore(vp, start_lbn)) ||
587 			    (last_bp == NULL && start_lbn == lbn))
588 				break;
589 
590 			if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK)
591 				break;
592 
593 			if ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))
594 				break;
595 
596 			/*
597 			 * Get the desired block buffer (unless it is the
598 			 * final sequential block whose buffer was passed in
599 			 * explictly as last_bp).
600 			 */
601 			if (last_bp == NULL || start_lbn != lbn) {
602 				if( tbp->b_flags & B_BUSY)
603 					break;
604 				tbp = getblk(vp, start_lbn, size, 0, 0);
605 				if (!(tbp->b_flags & B_DELWRI) ||
606 				    ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
607 					brelse(tbp);
608 					break;
609 				}
610 			} else
611 				tbp = last_bp;
612 		}
613 		for (j = 0; j < tbp->b_npages; j += 1) {
614 			bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
615 		}
616 		bp->b_npages += tbp->b_npages;
617 		bp->b_bcount += size;
618 		bp->b_bufsize += size;
619 
620 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
621 		tbp->b_flags |= B_ASYNC;
622 		s = splbio();
623 		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
624 		++tbp->b_vp->v_numoutput;
625 		splx(s);
626 		b_save->bs_children[i] = tbp;
627 	}
628 	b_save->bs_nchildren = i;
629 	pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *) bp->b_pages, bp->b_npages);
630 	bawrite(bp);
631 
632 	if (i < len) {
633 		len -= i;
634 		goto redo;
635 	}
636 }
637 
638 /*
639  * Collect together all the buffers in a cluster.
640  * Plus add one additional buffer.
641  */
642 struct cluster_save *
643 cluster_collectbufs(vp, last_bp)
644 	struct vnode *vp;
645 	struct buf *last_bp;
646 {
647 	struct cluster_save *buflist;
648 	daddr_t lbn;
649 	int i, len;
650 
651 	len = vp->v_lastw - vp->v_cstart + 1;
652 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
653 	    M_SEGMENT, M_WAITOK);
654 	buflist->bs_nchildren = 0;
655 	buflist->bs_children = (struct buf **) (buflist + 1);
656 	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
657 		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
658 		    &buflist->bs_children[i]);
659 	buflist->bs_children[i] = last_bp;
660 	buflist->bs_nchildren = i + 1;
661 	return (buflist);
662 }
663