xref: /freebsd/sys/kern/vfs_cluster.c (revision a316b26e50bbed7cf655fbba726ab87d8ab7599d)
1 /*-
2  * Copyright (c) 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * Modifications/enhancements:
5  * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36  * $Id: vfs_cluster.c,v 1.7 1994/12/18 03:05:49 davidg Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/buf.h>
43 #include <sys/vnode.h>
44 #include <sys/mount.h>
45 #include <sys/trace.h>
46 #include <sys/malloc.h>
47 #include <sys/resourcevar.h>
48 #include <sys/vmmeter.h>
49 #include <miscfs/specfs/specdev.h>
50 
51 #ifdef DEBUG
52 #include <vm/vm.h>
53 #include <sys/sysctl.h>
54 int doreallocblks = 0;
55 struct ctldebug debug13 = {"doreallocblks", &doreallocblks};
56 
57 #else
58 /* XXX for cluster_write */
59 #define doreallocblks 0
60 #endif
61 
62 /*
63  * Local declarations
64  */
65 struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *,
66     daddr_t, daddr_t, long, int, long));
67 void cluster_wbuild __P((struct vnode *, struct buf *, long, daddr_t, int, daddr_t));
68 struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
69 
70 int totreads;
71 int totreadblocks;
72 
73 #ifdef DIAGNOSTIC
74 /*
75  * Set to 1 if reads of block zero should cause readahead to be done.
76  * Set to 0 treats a read of block zero as a non-sequential read.
77  *
78  * Setting to one assumes that most reads of block zero of files are due to
79  * sequential passes over the files (e.g. cat, sum) where additional blocks
80  * will soon be needed.  Setting to zero assumes that the majority are
81  * surgical strikes to get particular info (e.g. size, file) where readahead
82  * blocks will not be used and, in fact, push out other potentially useful
83  * blocks from the cache.  The former seems intuitive, but some quick tests
84  * showed that the latter performed better from a system-wide point of view.
85  */
86 	int doclusterraz = 0;
87 
88 #define ISSEQREAD(vp, blk) \
89 	(((blk) != 0 || doclusterraz) && \
90 	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
91 #else
92 #define ISSEQREAD(vp, blk) \
93 	((blk) != 0 && ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
94 #endif
95 
96 /*
97  * This replaces bread.  If this is a bread at the beginning of a file and
98  * lastr is 0, we assume this is the first read and we'll read up to two
99  * blocks if they are sequential.  After that, we'll do regular read ahead
100  * in clustered chunks.
101  * 	bp is the block requested.
102  *	rbp is the read-ahead block.
103  *	If either is NULL, then you don't have to do the I/O.
104  */
105 int
106 cluster_read(vp, filesize, lblkno, size, cred, bpp)
107 	struct vnode *vp;
108 	u_quad_t filesize;
109 	daddr_t lblkno;
110 	long size;
111 	struct ucred *cred;
112 	struct buf **bpp;
113 {
114 	struct buf *bp, *rbp;
115 	daddr_t blkno, rablkno, origlblkno;
116 	long flags;
117 	int error, num_ra, alreadyincore;
118 
119 	origlblkno = lblkno;
120 	error = 0;
121 	/*
122 	 * get the requested block
123 	 */
124 	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
125 	/*
126 	 * if it is in the cache, then check to see if the reads have been
127 	 * sequential.  If they have, then try some read-ahead, otherwise
128 	 * back-off on prospective read-aheads.
129 	 */
130 	if (bp->b_flags & B_CACHE) {
131 		int i;
132 
133 		if (!ISSEQREAD(vp, origlblkno)) {
134 			vp->v_ralen >>= 1;
135 			return 0;
136 		}
137 		bp = NULL;
138 	} else {
139 		/*
140 		 * if it isn't in the cache, then get a chunk from disk if
141 		 * sequential, otherwise just get the block.
142 		 */
143 		bp->b_flags |= B_READ;
144 		lblkno += 1;
145 		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
146 	}
147 	/*
148 	 * if ralen is "none", then try a little
149 	 */
150 	if (vp->v_ralen == 0)
151 		vp->v_ralen = 1;
152 	/*
153 	 * assume no read-ahead
154 	 */
155 	alreadyincore = 1;
156 	rablkno = lblkno;
157 
158 	/*
159 	 * if we have been doing sequential I/O, then do some read-ahead
160 	 */
161 	if (ISSEQREAD(vp, origlblkno)) {
162 		int i;
163 
164 		/*
165 		 * this code makes sure that the stuff that we have read-ahead
166 		 * is still in the cache.  If it isn't, we have been reading
167 		 * ahead too much, and we need to back-off, otherwise we might
168 		 * try to read more.
169 		 */
170 		for (i = 0; i < vp->v_ralen; i++) {
171 			rablkno = lblkno + i;
172 			alreadyincore = (int) incore(vp, rablkno);
173 			if (!alreadyincore) {
174 				if (rablkno < vp->v_maxra) {
175 					vp->v_maxra = rablkno;
176 					vp->v_ralen >>= 1;
177 					alreadyincore = 1;
178 				} else {
179 					if (inmem(vp, rablkno))
180 						continue;
181 					if ((vp->v_ralen + 1) < MAXPHYS / size)
182 						vp->v_ralen++;
183 				}
184 				break;
185 			}
186 		}
187 	}
188 	/*
189 	 * we now build the read-ahead buffer if it is desirable.
190 	 */
191 	rbp = NULL;
192 	if (!alreadyincore &&
193 	    (rablkno + 1) * size <= filesize &&
194 	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra)) &&
195 	    blkno != -1) {
196 		if (num_ra > vp->v_ralen)
197 			num_ra = vp->v_ralen;
198 
199 		if (num_ra &&
200 		    ((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_reserved)) {
201 			rbp = cluster_rbuild(vp, filesize,
202 			    NULL, rablkno, blkno, size, num_ra, B_READ | B_ASYNC);
203 		} else {
204 			rbp = getblk(vp, rablkno, size, 0, 0);
205 			rbp->b_flags |= B_READ | B_ASYNC;
206 			rbp->b_blkno = blkno;
207 		}
208 	}
209 skip_readahead:
210 	/*
211 	 * if the synchronous read is a cluster, handle it, otherwise do a
212 	 * simple, non-clustered read.
213 	 */
214 	if (bp) {
215 		if (bp->b_flags & (B_DONE | B_DELWRI))
216 			panic("cluster_read: DONE bp");
217 		else {
218 			vfs_busy_pages(bp, 0);
219 			error = VOP_STRATEGY(bp);
220 			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
221 			totreads++;
222 			totreadblocks += bp->b_bcount / size;
223 			curproc->p_stats->p_ru.ru_inblock++;
224 		}
225 	}
226 	/*
227 	 * and if we have read-aheads, do them too
228 	 */
229 	if (rbp) {
230 		if (error || (rbp->b_flags & B_CACHE)) {
231 			rbp->b_flags &= ~(B_ASYNC | B_READ);
232 			brelse(rbp);
233 		} else {
234 			vfs_busy_pages(rbp, 0);
235 			(void) VOP_STRATEGY(rbp);
236 			vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
237 			totreads++;
238 			totreadblocks += rbp->b_bcount / size;
239 			curproc->p_stats->p_ru.ru_inblock++;
240 		}
241 	}
242 	if (bp)
243 		return (biowait(bp));
244 	return (error);
245 }
246 
247 /*
248  * If blocks are contiguous on disk, use this to provide clustered
249  * read ahead.  We will read as many blocks as possible sequentially
250  * and then parcel them up into logical blocks in the buffer hash table.
251  */
252 struct buf *
253 cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags)
254 	struct vnode *vp;
255 	u_quad_t filesize;
256 	struct buf *bp;
257 	daddr_t lbn;
258 	daddr_t blkno;
259 	long size;
260 	int run;
261 	long flags;
262 {
263 	struct cluster_save *b_save;
264 	struct buf *tbp;
265 	daddr_t bn;
266 	int i, inc, j;
267 
268 #ifdef DIAGNOSTIC
269 	if (size != vp->v_mount->mnt_stat.f_iosize)
270 		panic("cluster_rbuild: size %d != filesize %d\n",
271 		    size, vp->v_mount->mnt_stat.f_iosize);
272 #endif
273 	if (size * (lbn + run + 1) > filesize)
274 		--run;
275 	if (run == 0) {
276 		if (!bp) {
277 			bp = getblk(vp, lbn, size, 0, 0);
278 			bp->b_blkno = blkno;
279 			bp->b_flags |= flags;
280 		}
281 		return (bp);
282 	}
283 	tbp = bp;
284 	if (!tbp) {
285 		tbp = getblk(vp, lbn, size, 0, 0);
286 	}
287 	if (tbp->b_flags & B_CACHE) {
288 		return (tbp);
289 	} else if (bp == NULL) {
290 		tbp->b_flags |= B_ASYNC;
291 	}
292 	bp = getpbuf();
293 	bp->b_flags = flags | B_CALL | B_BUSY | B_CLUSTER;
294 	bp->b_iodone = cluster_callback;
295 	bp->b_blkno = blkno;
296 	bp->b_lblkno = lbn;
297 	pbgetvp(vp, bp);
298 
299 	b_save = malloc(sizeof(struct buf *) * (run + 1) + sizeof(struct cluster_save),
300 	    M_SEGMENT, M_WAITOK);
301 	b_save->bs_nchildren = 0;
302 	b_save->bs_children = (struct buf **) (b_save + 1);
303 	bp->b_saveaddr = b_save;
304 
305 	bp->b_bcount = 0;
306 	bp->b_bufsize = 0;
307 	bp->b_npages = 0;
308 
309 	if (tbp->b_flags & B_VMIO)
310 		bp->b_flags |= B_VMIO;
311 
312 	inc = btodb(size);
313 	for (bn = blkno, i = 0; i <= run; ++i, bn += inc) {
314 		if (i != 0) {
315 			if (inmem(vp, lbn + i)) {
316 				break;
317 			}
318 			tbp = getblk(vp, lbn + i, size, 0, 0);
319 			if ((tbp->b_flags & B_CACHE) ||
320 			    (tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO)) {
321 				brelse(tbp);
322 				break;
323 			}
324 			tbp->b_blkno = bn;
325 			tbp->b_flags |= flags | B_READ | B_ASYNC;
326 		} else {
327 			tbp->b_flags |= flags | B_READ;
328 		}
329 		++b_save->bs_nchildren;
330 		b_save->bs_children[i] = tbp;
331 		for (j = 0; j < tbp->b_npages; j += 1) {
332 			bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
333 		}
334 		bp->b_npages += tbp->b_npages;
335 		bp->b_bcount += size;
336 		bp->b_bufsize += size;
337 	}
338 	pmap_qenter(bp->b_data, bp->b_pages, bp->b_npages);
339 	return (bp);
340 }
341 
342 /*
343  * Cleanup after a clustered read or write.
344  * This is complicated by the fact that any of the buffers might have
345  * extra memory (if there were no empty buffer headers at allocbuf time)
346  * that we will need to shift around.
347  */
348 void
349 cluster_callback(bp)
350 	struct buf *bp;
351 {
352 	struct cluster_save *b_save;
353 	struct buf **bpp, *tbp;
354 	caddr_t cp;
355 	int error = 0;
356 
357 	/*
358 	 * Must propogate errors to all the components.
359 	 */
360 	if (bp->b_flags & B_ERROR)
361 		error = bp->b_error;
362 
363 	b_save = (struct cluster_save *) (bp->b_saveaddr);
364 	pmap_qremove(bp->b_data, bp->b_npages);
365 	/*
366 	 * Move memory from the large cluster buffer into the component
367 	 * buffers and mark IO as done on these.
368 	 */
369 	for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
370 		tbp = *bpp;
371 		if (error) {
372 			tbp->b_flags |= B_ERROR;
373 			tbp->b_error = error;
374 		}
375 		biodone(tbp);
376 	}
377 	free(b_save, M_SEGMENT);
378 	relpbuf(bp);
379 }
380 
381 /*
382  * Do clustered write for FFS.
383  *
384  * Three cases:
385  *	1. Write is not sequential (write asynchronously)
386  *	Write is sequential:
387  *	2.	beginning of cluster - begin cluster
388  *	3.	middle of a cluster - add to cluster
389  *	4.	end of a cluster - asynchronously write cluster
390  */
391 void
392 cluster_write(bp, filesize)
393 	struct buf *bp;
394 	u_quad_t filesize;
395 {
396 	struct vnode *vp;
397 	daddr_t lbn;
398 	int maxclen, cursize;
399 	int lblocksize;
400 
401 	vp = bp->b_vp;
402 	lblocksize = vp->v_mount->mnt_stat.f_iosize;
403 	lbn = bp->b_lblkno;
404 
405 	/* Initialize vnode to beginning of file. */
406 	if (lbn == 0)
407 		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
408 
409 	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
410 	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
411 		maxclen = MAXPHYS / lblocksize;
412 		if (vp->v_clen != 0) {
413 			/*
414 			 * Next block is not sequential.
415 			 *
416 			 * If we are not writing at end of file, the process
417 			 * seeked to another point in the file since its last
418 			 * write, or we have reached our maximum cluster size,
419 			 * then push the previous cluster. Otherwise try
420 			 * reallocating to make it sequential.
421 			 */
422 			cursize = vp->v_lastw - vp->v_cstart + 1;
423 			cluster_wbuild(vp, NULL, lblocksize,
424 			    vp->v_cstart, cursize, lbn);
425 		}
426 		/*
427 		 * Consider beginning a cluster. If at end of file, make
428 		 * cluster as large as possible, otherwise find size of
429 		 * existing cluster.
430 		 */
431 		if ((lbn + 1) * lblocksize != filesize &&
432 		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) ||
433 			bp->b_blkno == -1)) {
434 			bawrite(bp);
435 			vp->v_clen = 0;
436 			vp->v_lasta = bp->b_blkno;
437 			vp->v_cstart = lbn + 1;
438 			vp->v_lastw = lbn;
439 			return;
440 		}
441 		vp->v_clen = maxclen;
442 		if (maxclen == 0) {	/* I/O not contiguous */
443 			vp->v_cstart = lbn + 1;
444 			bawrite(bp);
445 		} else {	/* Wait for rest of cluster */
446 			vp->v_cstart = lbn;
447 			bdwrite(bp);
448 		}
449 	} else if (lbn == vp->v_cstart + vp->v_clen) {
450 		/*
451 		 * At end of cluster, write it out.
452 		 */
453 		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
454 		    vp->v_clen + 1, lbn);
455 		vp->v_clen = 0;
456 		vp->v_cstart = lbn + 1;
457 	} else
458 		/*
459 		 * In the middle of a cluster, so just delay the I/O for now.
460 		 */
461 		bdwrite(bp);
462 	vp->v_lastw = lbn;
463 	vp->v_lasta = bp->b_blkno;
464 }
465 
466 
467 /*
468  * This is an awful lot like cluster_rbuild...wish they could be combined.
469  * The last lbn argument is the current block on which I/O is being
470  * performed.  Check to see that it doesn't fall in the middle of
471  * the current block (if last_bp == NULL).
472  */
473 void
474 cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
475 	struct vnode *vp;
476 	struct buf *last_bp;
477 	long size;
478 	daddr_t start_lbn;
479 	int len;
480 	daddr_t lbn;
481 {
482 	struct cluster_save *b_save;
483 	struct buf *bp, *tbp;
484 	caddr_t cp;
485 	int i, j, s;
486 
487 #ifdef DIAGNOSTIC
488 	if (size != vp->v_mount->mnt_stat.f_iosize)
489 		panic("cluster_wbuild: size %d != filesize %d\n",
490 		    size, vp->v_mount->mnt_stat.f_iosize);
491 #endif
492 redo:
493 	while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) {
494 		++start_lbn;
495 		--len;
496 	}
497 
498 	/* Get more memory for current buffer */
499 	if (len <= 1) {
500 		if (last_bp) {
501 			bawrite(last_bp);
502 		} else if (len) {
503 			bp = getblk(vp, start_lbn, size, 0, 0);
504 			bawrite(bp);
505 		}
506 		return;
507 	}
508 	tbp = getblk(vp, start_lbn, size, 0, 0);
509 	if (!(tbp->b_flags & B_DELWRI)) {
510 		++start_lbn;
511 		--len;
512 		brelse(tbp);
513 		goto redo;
514 	}
515 	/*
516 	 * Extra memory in the buffer, punt on this buffer. XXX we could
517 	 * handle this in most cases, but we would have to push the extra
518 	 * memory down to after our max possible cluster size and then
519 	 * potentially pull it back up if the cluster was terminated
520 	 * prematurely--too much hassle.
521 	 */
522 	if (tbp->b_bcount != tbp->b_bufsize) {
523 		++start_lbn;
524 		--len;
525 		bawrite(tbp);
526 		goto redo;
527 	}
528 	bp = getpbuf();
529 	b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
530 	    M_SEGMENT, M_WAITOK);
531 	b_save->bs_nchildren = 0;
532 	b_save->bs_children = (struct buf **) (b_save + 1);
533 	bp->b_saveaddr = b_save;
534 	bp->b_bcount = 0;
535 	bp->b_bufsize = 0;
536 	bp->b_npages = 0;
537 
538 	if (tbp->b_flags & B_VMIO)
539 		bp->b_flags |= B_VMIO;
540 
541 	bp->b_blkno = tbp->b_blkno;
542 	bp->b_lblkno = tbp->b_lblkno;
543 	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
544 	bp->b_iodone = cluster_callback;
545 	pbgetvp(vp, bp);
546 
547 	for (i = 0; i < len; ++i, ++start_lbn) {
548 		if (i != 0) {
549 			/*
550 			 * Block is not in core or the non-sequential block
551 			 * ending our cluster was part of the cluster (in
552 			 * which case we don't want to write it twice).
553 			 */
554 			if (!(tbp = incore(vp, start_lbn)) ||
555 			    (last_bp == NULL && start_lbn == lbn))
556 				break;
557 
558 			if ((tbp->b_flags & (B_INVAL | B_BUSY | B_CLUSTEROK)) != B_CLUSTEROK)
559 				break;
560 
561 			/*
562 			 * Get the desired block buffer (unless it is the
563 			 * final sequential block whose buffer was passed in
564 			 * explictly as last_bp).
565 			 */
566 			if (last_bp == NULL || start_lbn != lbn) {
567 				tbp = getblk(vp, start_lbn, size, 0, 0);
568 				if (!(tbp->b_flags & B_DELWRI) ||
569 				    ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
570 					brelse(tbp);
571 					break;
572 				}
573 			} else
574 				tbp = last_bp;
575 		}
576 		for (j = 0; j < tbp->b_npages; j += 1) {
577 			bp->b_pages[j + bp->b_npages] = tbp->b_pages[j];
578 		}
579 		bp->b_npages += tbp->b_npages;
580 		bp->b_bcount += size;
581 		bp->b_bufsize += size;
582 
583 		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
584 		tbp->b_flags |= B_ASYNC;
585 		s = splbio();
586 		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
587 		++tbp->b_vp->v_numoutput;
588 		splx(s);
589 		b_save->bs_children[i] = tbp;
590 	}
591 	b_save->bs_nchildren = i;
592 	pmap_qenter(bp->b_data, bp->b_pages, bp->b_npages);
593 	bawrite(bp);
594 
595 	if (i < len) {
596 		len -= i;
597 		goto redo;
598 	}
599 }
600 
601 /*
602  * Collect together all the buffers in a cluster.
603  * Plus add one additional buffer.
604  */
605 struct cluster_save *
606 cluster_collectbufs(vp, last_bp)
607 	struct vnode *vp;
608 	struct buf *last_bp;
609 {
610 	struct cluster_save *buflist;
611 	daddr_t lbn;
612 	int i, len;
613 
614 	len = vp->v_lastw - vp->v_cstart + 1;
615 	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
616 	    M_SEGMENT, M_WAITOK);
617 	buflist->bs_nchildren = 0;
618 	buflist->bs_children = (struct buf **) (buflist + 1);
619 	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
620 		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
621 		    &buflist->bs_children[i]);
622 	buflist->bs_children[i] = last_bp;
623 	buflist->bs_nchildren = i + 1;
624 	return (buflist);
625 }
626