xref: /freebsd/sys/kern/vfs_cluster.c (revision e40db2c46ecb75fdaf399d0a439ae31e501c097c)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $FreeBSD$
37  */
38 
39 #include "opt_debug_cluster.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/bio.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <vm/vm.h>
53 #include <vm/vm_object.h>
54 #include <vm/vm_page.h>
55 #include <sys/sysctl.h>
56 
57 #if defined(CLUSTERDEBUG)
58 static int	rcluster= 0;
59 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
60     "Debug VFS clustering code");
61 #endif
62 
63 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer");
64 
65 static struct cluster_save *
66 	cluster_collectbufs(struct vnode *vp, struct buf *last_bp);
67 static struct buf *
68 	cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
69 			 daddr_t blkno, long size, int run, struct buf *fbp);
70 
71 static int write_behind = 1;
72 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
73     "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
74 
75 static int read_max = 8;
76 SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
77     "Cluster read-ahead max block count");
78 
79 /* Page expended to mark partially backed buffers */
80 extern vm_page_t	bogus_page;
81 
82 /*
83  * Number of physical bufs (pbufs) this subsystem is allowed.
84  * Manipulated by vm_pager.c
85  */
86 extern int cluster_pbuf_freecnt;
87 
88 /*
89  * Read data to a buf, including read-ahead if we find this to be beneficial.
90  * cluster_read replaces bread.
91  */
92 int
93 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
94 	struct vnode *vp;
95 	u_quad_t filesize;
96 	daddr_t lblkno;
97 	long size;
98 	struct ucred *cred;
99 	long totread;
100 	int seqcount;
101 	struct buf **bpp;
102 {
103 	struct buf *bp, *rbp, *reqbp;
104 	daddr_t blkno, origblkno;
105 	int maxra, racluster;
106 	int error, ncontig;
107 	int i;
108 
109 	error = 0;
110 
111 	/*
112 	 * Try to limit the amount of read-ahead by a few
113 	 * ad-hoc parameters.  This needs work!!!
114 	 */
115 	racluster = vp->v_mount->mnt_iosize_max / size;
116 	maxra = seqcount;
117 	maxra = min(read_max, maxra);
118 	maxra = min(nbuf/8, maxra);
119 	if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
120 		maxra = (filesize / size) - lblkno;
121 
122 	/*
123 	 * get the requested block
124 	 */
125 	*bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0);
126 	origblkno = lblkno;
127 
128 	/*
129 	 * if it is in the cache, then check to see if the reads have been
130 	 * sequential.  If they have, then try some read-ahead, otherwise
131 	 * back-off on prospective read-aheads.
132 	 */
133 	if (bp->b_flags & B_CACHE) {
134 		if (!seqcount) {
135 			return 0;
136 		} else if ((bp->b_flags & B_RAM) == 0) {
137 			return 0;
138 		} else {
139 			int s;
140 			bp->b_flags &= ~B_RAM;
141 			/*
142 			 * We do the spl here so that there is no window
143 			 * between the incore and the b_usecount increment
144 			 * below.  We opt to keep the spl out of the loop
145 			 * for efficiency.
146 			 */
147 			s = splbio();
148 			VI_LOCK(vp);
149 			for (i = 1; i < maxra; i++) {
150 				/*
151 				 * Stop if the buffer does not exist or it
152 				 * is invalid (about to go away?)
153 				 */
154 				rbp = gbincore(vp, lblkno+i);
155 				if (rbp == NULL || (rbp->b_flags & B_INVAL))
156 					break;
157 
158 				/*
159 				 * Set another read-ahead mark so we know
160 				 * to check again.
161 				 */
162 				if (((i % racluster) == (racluster - 1)) ||
163 					(i == (maxra - 1)))
164 					rbp->b_flags |= B_RAM;
165 			}
166 			VI_UNLOCK(vp);
167 			splx(s);
168 			if (i >= maxra) {
169 				return 0;
170 			}
171 			lblkno += i;
172 		}
173 		reqbp = bp = NULL;
174 	/*
175 	 * If it isn't in the cache, then get a chunk from
176 	 * disk if sequential, otherwise just get the block.
177 	 */
178 	} else {
179 		off_t firstread = bp->b_offset;
180 		int nblks;
181 
182 		KASSERT(bp->b_offset != NOOFFSET,
183 		    ("cluster_read: no buffer offset"));
184 
185 		ncontig = 0;
186 
187 		/*
188 		 * Compute the total number of blocks that we should read
189 		 * synchronously.
190 		 */
191 		if (firstread + totread > filesize)
192 			totread = filesize - firstread;
193 		nblks = howmany(totread, size);
194 		if (nblks > racluster)
195 			nblks = racluster;
196 
197 		/*
198 		 * Now compute the number of contiguous blocks.
199 		 */
200 		if (nblks > 1) {
201 	    		error = VOP_BMAP(vp, lblkno, NULL,
202 				&blkno, &ncontig, NULL);
203 			/*
204 			 * If this failed to map just do the original block.
205 			 */
206 			if (error || blkno == -1)
207 				ncontig = 0;
208 		}
209 
210 		/*
211 		 * If we have contiguous data available do a cluster
212 		 * otherwise just read the requested block.
213 		 */
214 		if (ncontig) {
215 			/* Account for our first block. */
216 			ncontig = min(ncontig + 1, nblks);
217 			if (ncontig < nblks)
218 				nblks = ncontig;
219 			bp = cluster_rbuild(vp, filesize, lblkno,
220 				blkno, size, nblks, bp);
221 			lblkno += (bp->b_bufsize / size);
222 		} else {
223 			bp->b_flags |= B_RAM;
224 			bp->b_iocmd = BIO_READ;
225 			lblkno += 1;
226 		}
227 	}
228 
229 	/*
230 	 * handle the synchronous read so that it is available ASAP.
231 	 */
232 	if (bp) {
233 		if ((bp->b_flags & B_CLUSTER) == 0) {
234 			vfs_busy_pages(bp, 0);
235 		}
236 		bp->b_flags &= ~B_INVAL;
237 		bp->b_ioflags &= ~BIO_ERROR;
238 		if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
239 			BUF_KERNPROC(bp);
240 		error = VOP_STRATEGY(vp, bp);
241 		curproc->p_stats->p_ru.ru_inblock++;
242 		if (error)
243 			return (error);
244 	}
245 
246 	/*
247 	 * If we have been doing sequential I/O, then do some read-ahead.
248 	 */
249 	while (lblkno < (origblkno + maxra)) {
250 		error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
251 		if (error)
252 			break;
253 
254 		if (blkno == -1)
255 			break;
256 
257 		/*
258 		 * We could throttle ncontig here by maxra but we might as
259 		 * well read the data if it is contiguous.  We're throttled
260 		 * by racluster anyway.
261 		 */
262 		if (ncontig) {
263 			ncontig = min(ncontig + 1, racluster);
264 			rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
265 				size, ncontig, NULL);
266 			lblkno += (rbp->b_bufsize / size);
267 			if (rbp->b_flags & B_DELWRI) {
268 				bqrelse(rbp);
269 				continue;
270 			}
271 		} else {
272 			rbp = getblk(vp, lblkno, size, 0, 0, 0);
273 			lblkno += 1;
274 			if (rbp->b_flags & B_DELWRI) {
275 				bqrelse(rbp);
276 				continue;
277 			}
278 			rbp->b_flags |= B_ASYNC | B_RAM;
279 			rbp->b_iocmd = BIO_READ;
280 			rbp->b_blkno = blkno;
281 		}
282 		if (rbp->b_flags & B_CACHE) {
283 			rbp->b_flags &= ~B_ASYNC;
284 			bqrelse(rbp);
285 			continue;
286 		}
287 		if ((rbp->b_flags & B_CLUSTER) == 0) {
288 			vfs_busy_pages(rbp, 0);
289 		}
290 		rbp->b_flags &= ~B_INVAL;
291 		rbp->b_ioflags &= ~BIO_ERROR;
292 		if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
293 			BUF_KERNPROC(rbp);
294 		(void) VOP_STRATEGY(vp, rbp);
295 		curproc->p_stats->p_ru.ru_inblock++;
296 	}
297 
298 	if (reqbp)
299 		return (bufwait(reqbp));
300 	else
301 		return (error);
302 }
303 
304 /*
305  * If blocks are contiguous on disk, use this to provide clustered
306  * read ahead.  We will read as many blocks as possible sequentially
307  * and then parcel them up into logical blocks in the buffer hash table.
308  */
309 static struct buf *
310 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
311 	struct vnode *vp;
312 	u_quad_t filesize;
313 	daddr_t lbn;
314 	daddr_t blkno;
315 	long size;
316 	int run;
317 	struct buf *fbp;
318 {
319 	struct buf *bp, *tbp;
320 	daddr_t bn;
321 	int i, inc, j;
322 
323 	GIANT_REQUIRED;
324 
325 	KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
326 	    ("cluster_rbuild: size %ld != filesize %ld\n",
327 	    size, vp->v_mount->mnt_stat.f_iosize));
328 
329 	/*
330 	 * avoid a division
331 	 */
332 	while ((u_quad_t) size * (lbn + run) > filesize) {
333 		--run;
334 	}
335 
336 	if (fbp) {
337 		tbp = fbp;
338 		tbp->b_iocmd = BIO_READ;
339 	} else {
340 		tbp = getblk(vp, lbn, size, 0, 0, 0);
341 		if (tbp->b_flags & B_CACHE)
342 			return tbp;
343 		tbp->b_flags |= B_ASYNC | B_RAM;
344 		tbp->b_iocmd = BIO_READ;
345 	}
346 
347 	tbp->b_blkno = blkno;
348 	if( (tbp->b_flags & B_MALLOC) ||
349 		((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
350 		return tbp;
351 
352 	bp = trypbuf(&cluster_pbuf_freecnt);
353 	if (bp == 0)
354 		return tbp;
355 
356 	/*
357 	 * We are synthesizing a buffer out of vm_page_t's, but
358 	 * if the block size is not page aligned then the starting
359 	 * address may not be either.  Inherit the b_data offset
360 	 * from the original buffer.
361 	 */
362 	bp->b_data = (char *)((vm_offset_t)bp->b_data |
363 	    ((vm_offset_t)tbp->b_data & PAGE_MASK));
364 	bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
365 	bp->b_iocmd = BIO_READ;
366 	bp->b_iodone = cluster_callback;
367 	bp->b_blkno = blkno;
368 	bp->b_lblkno = lbn;
369 	bp->b_offset = tbp->b_offset;
370 	KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
371 	pbgetvp(vp, bp);
372 
373 	TAILQ_INIT(&bp->b_cluster.cluster_head);
374 
375 	bp->b_bcount = 0;
376 	bp->b_bufsize = 0;
377 	bp->b_npages = 0;
378 
379 	inc = btodb(size);
380 	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
381 		if (i != 0) {
382 			if ((bp->b_npages * PAGE_SIZE) +
383 			    round_page(size) > vp->v_mount->mnt_iosize_max) {
384 				break;
385 			}
386 
387 			tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT);
388 
389 			/* Don't wait around for locked bufs. */
390 			if (tbp == NULL)
391 				break;
392 
393 			/*
394 			 * Stop scanning if the buffer is fully valid
395 			 * (marked B_CACHE), or locked (may be doing a
396 			 * background write), or if the buffer is not
397 			 * VMIO backed.  The clustering code can only deal
398 			 * with VMIO-backed buffers.
399 			 */
400 			if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
401 				(tbp->b_flags & B_VMIO) == 0) {
402 				bqrelse(tbp);
403 				break;
404 			}
405 
406 			/*
407 			 * The buffer must be completely invalid in order to
408 			 * take part in the cluster.  If it is partially valid
409 			 * then we stop.
410 			 */
411 			for (j = 0;j < tbp->b_npages; j++) {
412 				if (tbp->b_pages[j]->valid)
413 					break;
414 			}
415 			if (j != tbp->b_npages) {
416 				bqrelse(tbp);
417 				break;
418 			}
419 
420 			/*
421 			 * Set a read-ahead mark as appropriate
422 			 */
423 			if ((fbp && (i == 1)) || (i == (run - 1)))
424 				tbp->b_flags |= B_RAM;
425 
426 			/*
427 			 * Set the buffer up for an async read (XXX should
428 			 * we do this only if we do not wind up brelse()ing?).
429 			 * Set the block number if it isn't set, otherwise
430 			 * if it is make sure it matches the block number we
431 			 * expect.
432 			 */
433 			tbp->b_flags |= B_ASYNC;
434 			tbp->b_iocmd = BIO_READ;
435 			if (tbp->b_blkno == tbp->b_lblkno) {
436 				tbp->b_blkno = bn;
437 			} else if (tbp->b_blkno != bn) {
438 				brelse(tbp);
439 				break;
440 			}
441 		}
442 		/*
443 		 * XXX fbp from caller may not be B_ASYNC, but we are going
444 		 * to biodone() it in cluster_callback() anyway
445 		 */
446 		BUF_KERNPROC(tbp);
447 		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
448 			tbp, b_cluster.cluster_entry);
449 		vm_page_lock_queues();
450 		for (j = 0; j < tbp->b_npages; j += 1) {
451 			vm_page_t m;
452 			m = tbp->b_pages[j];
453 			vm_page_io_start(m);
454 			vm_object_pip_add(m->object, 1);
455 			if ((bp->b_npages == 0) ||
456 				(bp->b_pages[bp->b_npages-1] != m)) {
457 				bp->b_pages[bp->b_npages] = m;
458 				bp->b_npages++;
459 			}
460 			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
461 				tbp->b_pages[j] = bogus_page;
462 		}
463 		vm_page_unlock_queues();
464 		/*
465 		 * XXX shouldn't this be += size for both, like in
466 		 * cluster_wbuild()?
467 		 *
468 		 * Don't inherit tbp->b_bufsize as it may be larger due to
469 		 * a non-page-aligned size.  Instead just aggregate using
470 		 * 'size'.
471 		 */
472 		if (tbp->b_bcount != size)
473 			printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
474 		if (tbp->b_bufsize != size)
475 			printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
476 		bp->b_bcount += size;
477 		bp->b_bufsize += size;
478 	}
479 
480 	/*
481 	 * Fully valid pages in the cluster are already good and do not need
482 	 * to be re-read from disk.  Replace the page with bogus_page
483 	 */
484 	for (j = 0; j < bp->b_npages; j++) {
485 		if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
486 		    VM_PAGE_BITS_ALL) {
487 			bp->b_pages[j] = bogus_page;
488 		}
489 	}
490 	if (bp->b_bufsize > bp->b_kvasize)
491 		panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
492 		    bp->b_bufsize, bp->b_kvasize);
493 	bp->b_kvasize = bp->b_bufsize;
494 
495 	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
496 		(vm_page_t *)bp->b_pages, bp->b_npages);
497 	return (bp);
498 }
499 
500 /*
501  * Cleanup after a clustered read or write.
502  * This is complicated by the fact that any of the buffers might have
503  * extra memory (if there were no empty buffer headers at allocbuf time)
504  * that we will need to shift around.
505  */
506 void
507 cluster_callback(bp)
508 	struct buf *bp;
509 {
510 	struct buf *nbp, *tbp;
511 	int error = 0;
512 
513 	GIANT_REQUIRED;
514 
515 	/*
516 	 * Must propogate errors to all the components.
517 	 */
518 	if (bp->b_ioflags & BIO_ERROR)
519 		error = bp->b_error;
520 
521 	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
522 	/*
523 	 * Move memory from the large cluster buffer into the component
524 	 * buffers and mark IO as done on these.
525 	 */
526 	for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
527 		tbp; tbp = nbp) {
528 		nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
529 		if (error) {
530 			tbp->b_ioflags |= BIO_ERROR;
531 			tbp->b_error = error;
532 		} else {
533 			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
534 			tbp->b_flags &= ~B_INVAL;
535 			tbp->b_ioflags &= ~BIO_ERROR;
536 			/*
537 			 * XXX the bdwrite()/bqrelse() issued during
538 			 * cluster building clears B_RELBUF (see bqrelse()
539 			 * comment).  If direct I/O was specified, we have
540 			 * to restore it here to allow the buffer and VM
541 			 * to be freed.
542 			 */
543 			if (tbp->b_flags & B_DIRECT)
544 				tbp->b_flags |= B_RELBUF;
545 		}
546 		bufdone(tbp);
547 	}
548 	relpbuf(bp, &cluster_pbuf_freecnt);
549 }
550 
551 /*
552  *	cluster_wbuild_wb:
553  *
554  *	Implement modified write build for cluster.
555  *
556  *		write_behind = 0	write behind disabled
557  *		write_behind = 1	write behind normal (default)
558  *		write_behind = 2	write behind backed-off
559  */
560 
561 static __inline int
562 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len)
563 {
564 	int r = 0;
565 
566 	switch(write_behind) {
567 	case 2:
568 		if (start_lbn < len)
569 			break;
570 		start_lbn -= len;
571 		/* FALLTHROUGH */
572 	case 1:
573 		r = cluster_wbuild(vp, size, start_lbn, len);
574 		/* FALLTHROUGH */
575 	default:
576 		/* FALLTHROUGH */
577 		break;
578 	}
579 	return(r);
580 }
581 
582 /*
583  * Do clustered write for FFS.
584  *
585  * Three cases:
586  *	1. Write is not sequential (write asynchronously)
587  *	Write is sequential:
588  *	2.	beginning of cluster - begin cluster
589  *	3.	middle of a cluster - add to cluster
590  *	4.	end of a cluster - asynchronously write cluster
591  */
592 void
593 cluster_write(bp, filesize, seqcount)
594 	struct buf *bp;
595 	u_quad_t filesize;
596 	int seqcount;
597 {
598 	struct vnode *vp;
599 	daddr_t lbn;
600 	int maxclen, cursize;
601 	int lblocksize;
602 	int async;
603 
604 	vp = bp->b_vp;
605 	if (vp->v_type == VREG) {
606 		async = vp->v_mount->mnt_flag & MNT_ASYNC;
607 		lblocksize = vp->v_mount->mnt_stat.f_iosize;
608 	} else {
609 		async = 0;
610 		lblocksize = bp->b_bufsize;
611 	}
612 	lbn = bp->b_lblkno;
613 	KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
614 
615 	/* Initialize vnode to beginning of file. */
616 	if (lbn == 0)
617 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
618 
619 	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
620 	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
621 		maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
622 		if (vp->v_clen != 0) {
623 			/*
624 			 * Next block is not sequential.
625 			 *
626 			 * If we are not writing at end of file, the process
627 			 * seeked to another point in the file since its last
628 			 * write, or we have reached our maximum cluster size,
629 			 * then push the previous cluster. Otherwise try
630 			 * reallocating to make it sequential.
631 			 *
632 			 * Change to algorithm: only push previous cluster if
633 			 * it was sequential from the point of view of the
634 			 * seqcount heuristic, otherwise leave the buffer
635 			 * intact so we can potentially optimize the I/O
636 			 * later on in the buf_daemon or update daemon
637 			 * flush.
638 			 */
639 			cursize = vp->v_lastw - vp->v_cstart + 1;
640 			if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
641 			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
642 				if (!async && seqcount > 0) {
643 					cluster_wbuild_wb(vp, lblocksize,
644 						vp->v_cstart, cursize);
645 				}
646 			} else {
647 				struct buf **bpp, **endbp;
648 				struct cluster_save *buflist;
649 
650 				buflist = cluster_collectbufs(vp, bp);
651 				endbp = &buflist->bs_children
652 				    [buflist->bs_nchildren - 1];
653 				if (VOP_REALLOCBLKS(vp, buflist)) {
654 					/*
655 					 * Failed, push the previous cluster
656 					 * if *really* writing sequentially
657 					 * in the logical file (seqcount > 1),
658 					 * otherwise delay it in the hopes that
659 					 * the low level disk driver can
660 					 * optimize the write ordering.
661 					 */
662 					for (bpp = buflist->bs_children;
663 					     bpp < endbp; bpp++)
664 						brelse(*bpp);
665 					free(buflist, M_SEGMENT);
666 					if (seqcount > 1) {
667 						cluster_wbuild_wb(vp,
668 						    lblocksize, vp->v_cstart,
669 						    cursize);
670 					}
671 				} else {
672 					/*
673 					 * Succeeded, keep building cluster.
674 					 */
675 					for (bpp = buflist->bs_children;
676 					     bpp <= endbp; bpp++)
677 						bdwrite(*bpp);
678 					free(buflist, M_SEGMENT);
679 					vp->v_lastw = lbn;
680 					vp->v_lasta = bp->b_blkno;
681 					return;
682 				}
683 			}
684 		}
685 		/*
686 		 * Consider beginning a cluster. If at end of file, make
687 		 * cluster as large as possible, otherwise find size of
688 		 * existing cluster.
689 		 */
690 		if ((vp->v_type == VREG) &&
691 			((u_quad_t) bp->b_offset + lblocksize) != filesize &&
692 		    (bp->b_blkno == bp->b_lblkno) &&
693 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
694 		     bp->b_blkno == -1)) {
695 			bawrite(bp);
696 			vp->v_clen = 0;
697 			vp->v_lasta = bp->b_blkno;
698 			vp->v_cstart = lbn + 1;
699 			vp->v_lastw = lbn;
700 			return;
701 		}
702 		vp->v_clen = maxclen;
703 		if (!async && maxclen == 0) {	/* I/O not contiguous */
704 			vp->v_cstart = lbn + 1;
705 			bawrite(bp);
706 		} else {	/* Wait for rest of cluster */
707 			vp->v_cstart = lbn;
708 			bdwrite(bp);
709 		}
710 	} else if (lbn == vp->v_cstart + vp->v_clen) {
711 		/*
712 		 * At end of cluster, write it out if seqcount tells us we
713 		 * are operating sequentially, otherwise let the buf or
714 		 * update daemon handle it.
715 		 */
716 		bdwrite(bp);
717 		if (seqcount > 1)
718 			cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1);
719 		vp->v_clen = 0;
720 		vp->v_cstart = lbn + 1;
721 	} else if (vm_page_count_severe()) {
722 		/*
723 		 * We are low on memory, get it going NOW
724 		 */
725 		bawrite(bp);
726 	} else {
727 		/*
728 		 * In the middle of a cluster, so just delay the I/O for now.
729 		 */
730 		bdwrite(bp);
731 	}
732 	vp->v_lastw = lbn;
733 	vp->v_lasta = bp->b_blkno;
734 }
735 
736 
737 /*
738  * This is an awful lot like cluster_rbuild...wish they could be combined.
739  * The last lbn argument is the current block on which I/O is being
740  * performed.  Check to see that it doesn't fall in the middle of
741  * the current block (if last_bp == NULL).
742  */
743 int
744 cluster_wbuild(vp, size, start_lbn, len)
745 	struct vnode *vp;
746 	long size;
747 	daddr_t start_lbn;
748 	int len;
749 {
750 	struct buf *bp, *tbp;
751 	int i, j, s;
752 	int totalwritten = 0;
753 	int dbsize = btodb(size);
754 
755 	GIANT_REQUIRED;
756 
757 	while (len > 0) {
758 		s = splbio();
759 		/*
760 		 * If the buffer is not delayed-write (i.e. dirty), or it
761 		 * is delayed-write but either locked or inval, it cannot
762 		 * partake in the clustered write.
763 		 */
764 		VI_LOCK(vp);
765 		if ((tbp = gbincore(vp, start_lbn)) == NULL) {
766 			VI_UNLOCK(vp);
767 			++start_lbn;
768 			--len;
769 			splx(s);
770 			continue;
771 		}
772 		if (BUF_LOCK(tbp,
773 		    LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) {
774 			++start_lbn;
775 			--len;
776 			splx(s);
777 			continue;
778 		}
779 		if ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) !=
780 		    B_DELWRI) {
781 			BUF_UNLOCK(tbp);
782 			++start_lbn;
783 			--len;
784 			splx(s);
785 			continue;
786 		}
787 		bremfree(tbp);
788 		tbp->b_flags &= ~B_DONE;
789 		splx(s);
790 
791 		/*
792 		 * Extra memory in the buffer, punt on this buffer.
793 		 * XXX we could handle this in most cases, but we would
794 		 * have to push the extra memory down to after our max
795 		 * possible cluster size and then potentially pull it back
796 		 * up if the cluster was terminated prematurely--too much
797 		 * hassle.
798 		 */
799 		if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
800 		     (B_CLUSTEROK | B_VMIO)) ||
801 		  (tbp->b_bcount != tbp->b_bufsize) ||
802 		  (tbp->b_bcount != size) ||
803 		  (len == 1) ||
804 		  ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
805 			totalwritten += tbp->b_bufsize;
806 			bawrite(tbp);
807 			++start_lbn;
808 			--len;
809 			continue;
810 		}
811 
812 		/*
813 		 * We got a pbuf to make the cluster in.
814 		 * so initialise it.
815 		 */
816 		TAILQ_INIT(&bp->b_cluster.cluster_head);
817 		bp->b_bcount = 0;
818 		bp->b_magic = tbp->b_magic;
819 		bp->b_op = tbp->b_op;
820 		bp->b_bufsize = 0;
821 		bp->b_npages = 0;
822 		if (tbp->b_wcred != NOCRED)
823 			bp->b_wcred = crhold(tbp->b_wcred);
824 
825 		bp->b_blkno = tbp->b_blkno;
826 		bp->b_lblkno = tbp->b_lblkno;
827 		bp->b_offset = tbp->b_offset;
828 
829 		/*
830 		 * We are synthesizing a buffer out of vm_page_t's, but
831 		 * if the block size is not page aligned then the starting
832 		 * address may not be either.  Inherit the b_data offset
833 		 * from the original buffer.
834 		 */
835 		bp->b_data = (char *)((vm_offset_t)bp->b_data |
836 		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
837 		bp->b_flags |= B_CLUSTER |
838 				(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN));
839 		bp->b_iodone = cluster_callback;
840 		pbgetvp(vp, bp);
841 		/*
842 		 * From this location in the file, scan forward to see
843 		 * if there are buffers with adjacent data that need to
844 		 * be written as well.
845 		 */
846 		for (i = 0; i < len; ++i, ++start_lbn) {
847 			if (i != 0) { /* If not the first buffer */
848 				s = splbio();
849 				/*
850 				 * If the adjacent data is not even in core it
851 				 * can't need to be written.
852 				 */
853 				VI_LOCK(vp);
854 				if ((tbp = gbincore(vp, start_lbn)) == NULL) {
855 					VI_UNLOCK(vp);
856 					splx(s);
857 					break;
858 				}
859 
860 				/*
861 				 * If it IS in core, but has different
862 				 * characteristics, or is locked (which
863 				 * means it could be undergoing a background
864 				 * I/O or be in a weird state), then don't
865 				 * cluster with it.
866 				 */
867 				if (BUF_LOCK(tbp,
868 				    LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
869 				    VI_MTX(vp))) {
870 					splx(s);
871 					break;
872 				}
873 
874 				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
875 				    B_INVAL | B_DELWRI | B_NEEDCOMMIT))
876 				    != (B_DELWRI | B_CLUSTEROK |
877 				    (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
878 				    (tbp->b_flags & B_LOCKED) ||
879 				    tbp->b_wcred != bp->b_wcred) {
880 					BUF_UNLOCK(tbp);
881 					splx(s);
882 					break;
883 				}
884 
885 				/*
886 				 * Check that the combined cluster
887 				 * would make sense with regard to pages
888 				 * and would not be too large
889 				 */
890 				if ((tbp->b_bcount != size) ||
891 				  ((bp->b_blkno + (dbsize * i)) !=
892 				    tbp->b_blkno) ||
893 				  ((tbp->b_npages + bp->b_npages) >
894 				    (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
895 					BUF_UNLOCK(tbp);
896 					splx(s);
897 					break;
898 				}
899 				/*
900 				 * Ok, it's passed all the tests,
901 				 * so remove it from the free list
902 				 * and mark it busy. We will use it.
903 				 */
904 				bremfree(tbp);
905 				tbp->b_flags &= ~B_DONE;
906 				splx(s);
907 			} /* end of code for non-first buffers only */
908 			/* check for latent dependencies to be handled */
909 			if ((LIST_FIRST(&tbp->b_dep)) != NULL)
910 				buf_start(tbp);
911 			/*
912 			 * If the IO is via the VM then we do some
913 			 * special VM hackery (yuck).  Since the buffer's
914 			 * block size may not be page-aligned it is possible
915 			 * for a page to be shared between two buffers.  We
916 			 * have to get rid of the duplication when building
917 			 * the cluster.
918 			 */
919 			if (tbp->b_flags & B_VMIO) {
920 				vm_page_t m;
921 
922 				if (i != 0) { /* if not first buffer */
923 					for (j = 0; j < tbp->b_npages; j += 1) {
924 						m = tbp->b_pages[j];
925 						if (m->flags & PG_BUSY) {
926 							bqrelse(tbp);
927 							goto finishcluster;
928 						}
929 					}
930 				}
931 				vm_page_lock_queues();
932 				for (j = 0; j < tbp->b_npages; j += 1) {
933 					m = tbp->b_pages[j];
934 					vm_page_io_start(m);
935 					vm_object_pip_add(m->object, 1);
936 					if ((bp->b_npages == 0) ||
937 					  (bp->b_pages[bp->b_npages - 1] != m)) {
938 						bp->b_pages[bp->b_npages] = m;
939 						bp->b_npages++;
940 					}
941 				}
942 				vm_page_unlock_queues();
943 			}
944 			bp->b_bcount += size;
945 			bp->b_bufsize += size;
946 
947 			s = splbio();
948 			bundirty(tbp);
949 			tbp->b_flags &= ~B_DONE;
950 			tbp->b_ioflags &= ~BIO_ERROR;
951 			tbp->b_flags |= B_ASYNC;
952 			tbp->b_iocmd = BIO_WRITE;
953 			reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
954 			VI_LOCK(tbp->b_vp);
955 			++tbp->b_vp->v_numoutput;
956 			VI_UNLOCK(tbp->b_vp);
957 			splx(s);
958 			BUF_KERNPROC(tbp);
959 			TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
960 				tbp, b_cluster.cluster_entry);
961 		}
962 	finishcluster:
963 		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
964 			(vm_page_t *) bp->b_pages, bp->b_npages);
965 		if (bp->b_bufsize > bp->b_kvasize)
966 			panic(
967 			    "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
968 			    bp->b_bufsize, bp->b_kvasize);
969 		bp->b_kvasize = bp->b_bufsize;
970 		totalwritten += bp->b_bufsize;
971 		bp->b_dirtyoff = 0;
972 		bp->b_dirtyend = bp->b_bufsize;
973 		bawrite(bp);
974 
975 		len -= i;
976 	}
977 	return totalwritten;
978 }
979 
980 /*
981  * Collect together all the buffers in a cluster.
982  * Plus add one additional buffer.
983  */
984 static struct cluster_save *
985 cluster_collectbufs(vp, last_bp)
986 	struct vnode *vp;
987 	struct buf *last_bp;
988 {
989 	struct cluster_save *buflist;
990 	struct buf *bp;
991 	daddr_t lbn;
992 	int i, len;
993 
994 	len = vp->v_lastw - vp->v_cstart + 1;
995 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
996 	    M_SEGMENT, M_WAITOK);
997 	buflist->bs_nchildren = 0;
998 	buflist->bs_children = (struct buf **) (buflist + 1);
999 	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1000 		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp);
1001 		buflist->bs_children[i] = bp;
1002 		if (bp->b_blkno == bp->b_lblkno)
1003 			VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1004 				NULL, NULL);
1005 	}
1006 	buflist->bs_children[i] = bp = last_bp;
1007 	if (bp->b_blkno == bp->b_lblkno)
1008 		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1009 			NULL, NULL);
1010 	buflist->bs_nchildren = i + 1;
1011 	return (buflist);
1012 }
1013