1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $Id: vfs_cluster.c,v 1.9 1995/01/24 10:00:46 davidg Exp $ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/proc.h> 42 #include <sys/buf.h> 43 #include <sys/vnode.h> 44 #include <sys/mount.h> 45 #include <sys/trace.h> 46 #include <sys/malloc.h> 47 #include <sys/resourcevar.h> 48 #include <sys/vmmeter.h> 49 #include <miscfs/specfs/specdev.h> 50 #include <vm/vm.h> 51 #include <vm/vm_pageout.h> 52 53 #ifdef DEBUG 54 #include <vm/vm.h> 55 #include <sys/sysctl.h> 56 int doreallocblks = 0; 57 struct ctldebug debug13 = {"doreallocblks", &doreallocblks}; 58 59 #else 60 /* XXX for cluster_write */ 61 #define doreallocblks 0 62 #endif 63 64 /* 65 * Local declarations 66 */ 67 struct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *, 68 daddr_t, daddr_t, long, int, long)); 69 void cluster_wbuild __P((struct vnode *, struct buf *, long, daddr_t, int, daddr_t)); 70 struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *)); 71 72 int totreads; 73 int totreadblocks; 74 75 #ifdef DIAGNOSTIC 76 /* 77 * Set to 1 if reads of block zero should cause readahead to be done. 78 * Set to 0 treats a read of block zero as a non-sequential read. 79 * 80 * Setting to one assumes that most reads of block zero of files are due to 81 * sequential passes over the files (e.g. cat, sum) where additional blocks 82 * will soon be needed. Setting to zero assumes that the majority are 83 * surgical strikes to get particular info (e.g. size, file) where readahead 84 * blocks will not be used and, in fact, push out other potentially useful 85 * blocks from the cache. The former seems intuitive, but some quick tests 86 * showed that the latter performed better from a system-wide point of view. 87 */ 88 int doclusterraz = 0; 89 90 #define ISSEQREAD(vp, blk) \ 91 (((blk) != 0 || doclusterraz) && \ 92 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr)) 93 #else 94 #define ISSEQREAD(vp, blk) \ 95 (/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr)) 96 #endif 97 98 /* 99 * This replaces bread. If this is a bread at the beginning of a file and 100 * lastr is 0, we assume this is the first read and we'll read up to two 101 * blocks if they are sequential. After that, we'll do regular read ahead 102 * in clustered chunks. 103 * bp is the block requested. 104 * rbp is the read-ahead block. 105 * If either is NULL, then you don't have to do the I/O. 106 */ 107 int 108 cluster_read(vp, filesize, lblkno, size, cred, bpp) 109 struct vnode *vp; 110 u_quad_t filesize; 111 daddr_t lblkno; 112 long size; 113 struct ucred *cred; 114 struct buf **bpp; 115 { 116 struct buf *bp, *rbp; 117 daddr_t blkno, rablkno, origlblkno; 118 long flags; 119 int error, num_ra, alreadyincore; 120 121 origlblkno = lblkno; 122 error = 0; 123 /* 124 * get the requested block 125 */ 126 *bpp = bp = getblk(vp, lblkno, size, 0, 0); 127 /* 128 * if it is in the cache, then check to see if the reads have been 129 * sequential. If they have, then try some read-ahead, otherwise 130 * back-off on prospective read-aheads. 131 */ 132 if (bp->b_flags & B_CACHE) { 133 int i; 134 135 if (!ISSEQREAD(vp, origlblkno)) { 136 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size; 137 vp->v_ralen >>= 1; 138 return 0; 139 } else if( vp->v_maxra >= origlblkno) { 140 if ((vp->v_ralen + 1) < (MAXPHYS / size)) 141 vp->v_ralen++; 142 if ( vp->v_maxra >= (origlblkno + vp->v_ralen)) 143 return 0; 144 lblkno = vp->v_maxra; 145 } 146 bp = NULL; 147 } else { 148 /* 149 * if it isn't in the cache, then get a chunk from disk if 150 * sequential, otherwise just get the block. 151 */ 152 bp->b_flags |= B_READ; 153 lblkno += 1; 154 curproc->p_stats->p_ru.ru_inblock++; /* XXX */ 155 } 156 /* 157 * if ralen is "none", then try a little 158 */ 159 if (vp->v_ralen == 0) 160 vp->v_ralen = 1; 161 /* 162 * assume no read-ahead 163 */ 164 alreadyincore = 1; 165 rablkno = lblkno; 166 167 /* 168 * if we have been doing sequential I/O, then do some read-ahead 169 */ 170 if (ISSEQREAD(vp, origlblkno)) { 171 int i; 172 173 /* 174 * this code makes sure that the stuff that we have read-ahead 175 * is still in the cache. If it isn't, we have been reading 176 * ahead too much, and we need to back-off, otherwise we might 177 * try to read more. 178 */ 179 for (i = 0; i < vp->v_ralen; i++) { 180 rablkno = lblkno + i; 181 alreadyincore = (int) incore(vp, rablkno); 182 if (!alreadyincore) { 183 if (rablkno < vp->v_maxra) { 184 vp->v_maxra = rablkno; 185 vp->v_ralen >>= 1; 186 alreadyincore = 1; 187 } else { 188 if (inmem(vp, rablkno)) { 189 if( vp->v_maxra < rablkno) 190 vp->v_maxra = rablkno + 1; 191 continue; 192 } 193 if ((vp->v_ralen + 1) < MAXPHYS / size) 194 vp->v_ralen++; 195 } 196 break; 197 } else if( vp->v_maxra < rablkno) { 198 vp->v_maxra = rablkno + 1; 199 } 200 } 201 } 202 /* 203 * we now build the read-ahead buffer if it is desirable. 204 */ 205 rbp = NULL; 206 if (!alreadyincore && 207 (rablkno + 1) * size <= filesize && 208 !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra)) && 209 blkno != -1) { 210 if ((vp->v_ralen + 1) < MAXPHYS / size) 211 vp->v_ralen++; 212 if (num_ra > vp->v_ralen) 213 num_ra = vp->v_ralen; 214 215 if (num_ra) { 216 rbp = cluster_rbuild(vp, filesize, 217 NULL, rablkno, blkno, size, num_ra, B_READ | B_ASYNC); 218 } else { 219 rbp = getblk(vp, rablkno, size, 0, 0); 220 rbp->b_flags |= B_READ | B_ASYNC; 221 rbp->b_blkno = blkno; 222 } 223 } 224 225 /* 226 * if the synchronous read is a cluster, handle it, otherwise do a 227 * simple, non-clustered read. 228 */ 229 if (bp) { 230 if (bp->b_flags & (B_DONE | B_DELWRI)) 231 panic("cluster_read: DONE bp"); 232 else { 233 vfs_busy_pages(bp, 0); 234 error = VOP_STRATEGY(bp); 235 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size; 236 totreads++; 237 totreadblocks += bp->b_bcount / size; 238 curproc->p_stats->p_ru.ru_inblock++; 239 } 240 } 241 /* 242 * and if we have read-aheads, do them too 243 */ 244 if (rbp) { 245 vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size; 246 if (error || (rbp->b_flags & B_CACHE)) { 247 rbp->b_flags &= ~(B_ASYNC | B_READ); 248 brelse(rbp); 249 } else { 250 vfs_busy_pages(rbp, 0); 251 (void) VOP_STRATEGY(rbp); 252 totreads++; 253 totreadblocks += rbp->b_bcount / size; 254 curproc->p_stats->p_ru.ru_inblock++; 255 } 256 } 257 if (bp && ((bp->b_flags & B_ASYNC) == 0)) 258 return (biowait(bp)); 259 return (error); 260 } 261 262 /* 263 * If blocks are contiguous on disk, use this to provide clustered 264 * read ahead. We will read as many blocks as possible sequentially 265 * and then parcel them up into logical blocks in the buffer hash table. 266 */ 267 struct buf * 268 cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags) 269 struct vnode *vp; 270 u_quad_t filesize; 271 struct buf *bp; 272 daddr_t lbn; 273 daddr_t blkno; 274 long size; 275 int run; 276 long flags; 277 { 278 struct cluster_save *b_save; 279 struct buf *tbp; 280 daddr_t bn; 281 int i, inc, j; 282 283 #ifdef DIAGNOSTIC 284 if (size != vp->v_mount->mnt_stat.f_iosize) 285 panic("cluster_rbuild: size %d != filesize %d\n", 286 size, vp->v_mount->mnt_stat.f_iosize); 287 #endif 288 if (size * (lbn + run + 1) > filesize) 289 --run; 290 if (run == 0) { 291 if (!bp) { 292 bp = getblk(vp, lbn, size, 0, 0); 293 bp->b_blkno = blkno; 294 bp->b_flags |= flags; 295 } 296 return (bp); 297 } 298 tbp = bp; 299 if (!tbp) { 300 tbp = getblk(vp, lbn, size, 0, 0); 301 } 302 if (tbp->b_flags & B_CACHE) { 303 return (tbp); 304 } else if (bp == NULL) { 305 tbp->b_flags |= B_ASYNC; 306 } 307 bp = getpbuf(); 308 bp->b_flags = flags | B_CALL | B_BUSY | B_CLUSTER; 309 bp->b_iodone = cluster_callback; 310 bp->b_blkno = blkno; 311 bp->b_lblkno = lbn; 312 pbgetvp(vp, bp); 313 314 b_save = malloc(sizeof(struct buf *) * (run + 1) + sizeof(struct cluster_save), 315 M_SEGMENT, M_WAITOK); 316 b_save->bs_nchildren = 0; 317 b_save->bs_children = (struct buf **) (b_save + 1); 318 bp->b_saveaddr = b_save; 319 320 bp->b_bcount = 0; 321 bp->b_bufsize = 0; 322 bp->b_npages = 0; 323 324 if (tbp->b_flags & B_VMIO) 325 bp->b_flags |= B_VMIO; 326 327 inc = btodb(size); 328 for (bn = blkno, i = 0; i <= run; ++i, bn += inc) { 329 if (i != 0) { 330 tbp = getblk(vp, lbn + i, size, 0, 0); 331 if ((tbp->b_flags & B_CACHE) || 332 (tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO)) { 333 brelse(tbp); 334 break; 335 } 336 tbp->b_blkno = bn; 337 tbp->b_flags |= flags | B_READ | B_ASYNC; 338 } else { 339 tbp->b_flags |= flags | B_READ; 340 } 341 ++b_save->bs_nchildren; 342 b_save->bs_children[i] = tbp; 343 for (j = 0; j < tbp->b_npages; j += 1) { 344 bp->b_pages[j + bp->b_npages] = tbp->b_pages[j]; 345 } 346 bp->b_npages += tbp->b_npages; 347 bp->b_bcount += size; 348 bp->b_bufsize += size; 349 } 350 pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *)bp->b_pages, bp->b_npages); 351 return (bp); 352 } 353 354 /* 355 * Cleanup after a clustered read or write. 356 * This is complicated by the fact that any of the buffers might have 357 * extra memory (if there were no empty buffer headers at allocbuf time) 358 * that we will need to shift around. 359 */ 360 void 361 cluster_callback(bp) 362 struct buf *bp; 363 { 364 struct cluster_save *b_save; 365 struct buf **bpp, *tbp; 366 caddr_t cp; 367 int error = 0; 368 369 /* 370 * Must propogate errors to all the components. 371 */ 372 if (bp->b_flags & B_ERROR) 373 error = bp->b_error; 374 375 b_save = (struct cluster_save *) (bp->b_saveaddr); 376 pmap_qremove((vm_offset_t) bp->b_data, bp->b_npages); 377 /* 378 * Move memory from the large cluster buffer into the component 379 * buffers and mark IO as done on these. 380 */ 381 for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) { 382 tbp = *bpp; 383 if (error) { 384 tbp->b_flags |= B_ERROR; 385 tbp->b_error = error; 386 } 387 biodone(tbp); 388 } 389 free(b_save, M_SEGMENT); 390 relpbuf(bp); 391 } 392 393 /* 394 * Do clustered write for FFS. 395 * 396 * Three cases: 397 * 1. Write is not sequential (write asynchronously) 398 * Write is sequential: 399 * 2. beginning of cluster - begin cluster 400 * 3. middle of a cluster - add to cluster 401 * 4. end of a cluster - asynchronously write cluster 402 */ 403 void 404 cluster_write(bp, filesize) 405 struct buf *bp; 406 u_quad_t filesize; 407 { 408 struct vnode *vp; 409 daddr_t lbn; 410 int maxclen, cursize; 411 int lblocksize; 412 413 vp = bp->b_vp; 414 lblocksize = vp->v_mount->mnt_stat.f_iosize; 415 lbn = bp->b_lblkno; 416 417 /* Initialize vnode to beginning of file. */ 418 if (lbn == 0) 419 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 420 421 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 422 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 423 maxclen = MAXPHYS / lblocksize - 1; 424 if (vp->v_clen != 0) { 425 /* 426 * Next block is not sequential. 427 * 428 * If we are not writing at end of file, the process 429 * seeked to another point in the file since its last 430 * write, or we have reached our maximum cluster size, 431 * then push the previous cluster. Otherwise try 432 * reallocating to make it sequential. 433 */ 434 cursize = vp->v_lastw - vp->v_cstart + 1; 435 cluster_wbuild(vp, NULL, lblocksize, 436 vp->v_cstart, cursize, lbn); 437 } 438 /* 439 * Consider beginning a cluster. If at end of file, make 440 * cluster as large as possible, otherwise find size of 441 * existing cluster. 442 */ 443 if ((lbn + 1) * lblocksize != filesize && 444 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) || 445 bp->b_blkno == -1)) { 446 bawrite(bp); 447 vp->v_clen = 0; 448 vp->v_lasta = bp->b_blkno; 449 vp->v_cstart = lbn + 1; 450 vp->v_lastw = lbn; 451 return; 452 } 453 vp->v_clen = maxclen; 454 if (maxclen == 0) { /* I/O not contiguous */ 455 vp->v_cstart = lbn + 1; 456 bawrite(bp); 457 } else { /* Wait for rest of cluster */ 458 vp->v_cstart = lbn; 459 bdwrite(bp); 460 } 461 } else if (lbn == vp->v_cstart + vp->v_clen) { 462 /* 463 * At end of cluster, write it out. 464 */ 465 cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart, 466 vp->v_clen + 1, lbn); 467 vp->v_clen = 0; 468 vp->v_cstart = lbn + 1; 469 } else 470 /* 471 * In the middle of a cluster, so just delay the I/O for now. 472 */ 473 bdwrite(bp); 474 vp->v_lastw = lbn; 475 vp->v_lasta = bp->b_blkno; 476 } 477 478 479 /* 480 * This is an awful lot like cluster_rbuild...wish they could be combined. 481 * The last lbn argument is the current block on which I/O is being 482 * performed. Check to see that it doesn't fall in the middle of 483 * the current block (if last_bp == NULL). 484 */ 485 void 486 cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn) 487 struct vnode *vp; 488 struct buf *last_bp; 489 long size; 490 daddr_t start_lbn; 491 int len; 492 daddr_t lbn; 493 { 494 struct cluster_save *b_save; 495 struct buf *bp, *tbp, *pb; 496 caddr_t cp; 497 int i, j, s; 498 499 #ifdef DIAGNOSTIC 500 if (size != vp->v_mount->mnt_stat.f_iosize) 501 panic("cluster_wbuild: size %d != filesize %d\n", 502 size, vp->v_mount->mnt_stat.f_iosize); 503 #endif 504 redo: 505 while ((!incore(vp, start_lbn) || start_lbn == lbn) && len) { 506 ++start_lbn; 507 --len; 508 } 509 510 pb = (struct buf *) trypbuf(); 511 /* Get more memory for current buffer */ 512 if (len <= 1 || pb == 0) { 513 relpbuf(pb); 514 if (last_bp) { 515 bawrite(last_bp); 516 } else if (len) { 517 bp = getblk(vp, start_lbn, size, 0, 0); 518 bawrite(bp); 519 } 520 return; 521 } 522 tbp = getblk(vp, start_lbn, size, 0, 0); 523 if (!(tbp->b_flags & B_DELWRI)) { 524 relpbuf(pb); 525 ++start_lbn; 526 --len; 527 brelse(tbp); 528 goto redo; 529 } 530 /* 531 * Extra memory in the buffer, punt on this buffer. XXX we could 532 * handle this in most cases, but we would have to push the extra 533 * memory down to after our max possible cluster size and then 534 * potentially pull it back up if the cluster was terminated 535 * prematurely--too much hassle. 536 */ 537 if (tbp->b_bcount != tbp->b_bufsize) { 538 relpbuf(pb); 539 ++start_lbn; 540 --len; 541 bawrite(tbp); 542 goto redo; 543 } 544 bp = pb; 545 b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save), 546 M_SEGMENT, M_WAITOK); 547 b_save->bs_nchildren = 0; 548 b_save->bs_children = (struct buf **) (b_save + 1); 549 bp->b_saveaddr = b_save; 550 bp->b_bcount = 0; 551 bp->b_bufsize = 0; 552 bp->b_npages = 0; 553 554 if (tbp->b_flags & B_VMIO) 555 bp->b_flags |= B_VMIO; 556 557 bp->b_blkno = tbp->b_blkno; 558 bp->b_lblkno = tbp->b_lblkno; 559 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER; 560 bp->b_iodone = cluster_callback; 561 pbgetvp(vp, bp); 562 563 for (i = 0; i < len; ++i, ++start_lbn) { 564 if (i != 0) { 565 /* 566 * Block is not in core or the non-sequential block 567 * ending our cluster was part of the cluster (in 568 * which case we don't want to write it twice). 569 */ 570 if (!(tbp = incore(vp, start_lbn)) || 571 (last_bp == NULL && start_lbn == lbn)) 572 break; 573 574 if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK) 575 break; 576 577 /* 578 * Get the desired block buffer (unless it is the 579 * final sequential block whose buffer was passed in 580 * explictly as last_bp). 581 */ 582 if (last_bp == NULL || start_lbn != lbn) { 583 if( tbp->b_flags & B_BUSY) 584 break; 585 tbp = getblk(vp, start_lbn, size, 0, 0); 586 if (!(tbp->b_flags & B_DELWRI) || 587 ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) { 588 brelse(tbp); 589 break; 590 } 591 } else 592 tbp = last_bp; 593 } 594 for (j = 0; j < tbp->b_npages; j += 1) { 595 bp->b_pages[j + bp->b_npages] = tbp->b_pages[j]; 596 } 597 bp->b_npages += tbp->b_npages; 598 bp->b_bcount += size; 599 bp->b_bufsize += size; 600 601 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 602 tbp->b_flags |= B_ASYNC; 603 s = splbio(); 604 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 605 ++tbp->b_vp->v_numoutput; 606 splx(s); 607 b_save->bs_children[i] = tbp; 608 } 609 b_save->bs_nchildren = i; 610 pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *) bp->b_pages, bp->b_npages); 611 bawrite(bp); 612 613 if (i < len) { 614 len -= i; 615 goto redo; 616 } 617 } 618 619 /* 620 * Collect together all the buffers in a cluster. 621 * Plus add one additional buffer. 622 */ 623 struct cluster_save * 624 cluster_collectbufs(vp, last_bp) 625 struct vnode *vp; 626 struct buf *last_bp; 627 { 628 struct cluster_save *buflist; 629 daddr_t lbn; 630 int i, len; 631 632 len = vp->v_lastw - vp->v_cstart + 1; 633 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 634 M_SEGMENT, M_WAITOK); 635 buflist->bs_nchildren = 0; 636 buflist->bs_children = (struct buf **) (buflist + 1); 637 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) 638 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, 639 &buflist->bs_children[i]); 640 buflist->bs_children[i] = last_bp; 641 buflist->bs_nchildren = i + 1; 642 return (buflist); 643 } 644