1 /*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD$ 37 */ 38 39 #include "opt_debug_cluster.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/proc.h> 45 #include <sys/bio.h> 46 #include <sys/buf.h> 47 #include <sys/vnode.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/resourcevar.h> 51 #include <sys/vmmeter.h> 52 #include <vm/vm.h> 53 #include <vm/vm_object.h> 54 #include <vm/vm_page.h> 55 #include <sys/sysctl.h> 56 57 #if defined(CLUSTERDEBUG) 58 #include <sys/sysctl.h> 59 static int rcluster= 0; 60 SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 61 #endif 62 63 static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 64 65 static struct cluster_save * 66 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 67 static struct buf * 68 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 69 daddr_t blkno, long size, int run, struct buf *fbp)); 70 71 static int write_behind = 1; 72 SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, ""); 73 74 extern vm_page_t bogus_page; 75 76 extern int cluster_pbuf_freecnt; 77 78 /* 79 * Maximum number of blocks for read-ahead. 80 */ 81 #define MAXRA 32 82 83 /* 84 * This replaces bread. 85 */ 86 int 87 cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 88 struct vnode *vp; 89 u_quad_t filesize; 90 daddr_t lblkno; 91 long size; 92 struct ucred *cred; 93 long totread; 94 int seqcount; 95 struct buf **bpp; 96 { 97 struct buf *bp, *rbp, *reqbp; 98 daddr_t blkno, origblkno; 99 int error, num_ra; 100 int i; 101 int maxra, racluster; 102 long origtotread; 103 104 error = 0; 105 106 /* 107 * Try to limit the amount of read-ahead by a few 108 * ad-hoc parameters. This needs work!!! 109 */ 110 racluster = vp->v_mount->mnt_iosize_max / size; 111 maxra = 2 * racluster + (totread / size); 112 if (maxra > MAXRA) 113 maxra = MAXRA; 114 if (maxra > nbuf/8) 115 maxra = nbuf/8; 116 117 /* 118 * get the requested block 119 */ 120 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 121 origblkno = lblkno; 122 origtotread = totread; 123 124 /* 125 * if it is in the cache, then check to see if the reads have been 126 * sequential. If they have, then try some read-ahead, otherwise 127 * back-off on prospective read-aheads. 128 */ 129 if (bp->b_flags & B_CACHE) { 130 if (!seqcount) { 131 return 0; 132 } else if ((bp->b_flags & B_RAM) == 0) { 133 return 0; 134 } else { 135 int s; 136 struct buf *tbp; 137 bp->b_flags &= ~B_RAM; 138 /* 139 * We do the spl here so that there is no window 140 * between the incore and the b_usecount increment 141 * below. We opt to keep the spl out of the loop 142 * for efficiency. 143 */ 144 s = splbio(); 145 for (i = 1; i < maxra; i++) { 146 147 if (!(tbp = incore(vp, lblkno+i))) { 148 break; 149 } 150 151 /* 152 * Set another read-ahead mark so we know 153 * to check again. 154 */ 155 if (((i % racluster) == (racluster - 1)) || 156 (i == (maxra - 1))) 157 tbp->b_flags |= B_RAM; 158 } 159 splx(s); 160 if (i >= maxra) { 161 return 0; 162 } 163 lblkno += i; 164 } 165 reqbp = bp = NULL; 166 } else { 167 off_t firstread = bp->b_offset; 168 169 KASSERT(bp->b_offset != NOOFFSET, 170 ("cluster_read: no buffer offset")); 171 if (firstread + totread > filesize) 172 totread = filesize - firstread; 173 if (totread > size) { 174 int nblks = 0; 175 int ncontigafter; 176 while (totread > 0) { 177 nblks++; 178 totread -= size; 179 } 180 if (nblks == 1) 181 goto single_block_read; 182 if (nblks > racluster) 183 nblks = racluster; 184 185 error = VOP_BMAP(vp, lblkno, NULL, 186 &blkno, &ncontigafter, NULL); 187 if (error) 188 goto single_block_read; 189 if (blkno == -1) 190 goto single_block_read; 191 if (ncontigafter == 0) 192 goto single_block_read; 193 if (ncontigafter + 1 < nblks) 194 nblks = ncontigafter + 1; 195 196 bp = cluster_rbuild(vp, filesize, lblkno, 197 blkno, size, nblks, bp); 198 lblkno += (bp->b_bufsize / size); 199 } else { 200 single_block_read: 201 /* 202 * if it isn't in the cache, then get a chunk from 203 * disk if sequential, otherwise just get the block. 204 */ 205 bp->b_flags |= B_RAM; 206 bp->b_iocmd = BIO_READ; 207 lblkno += 1; 208 } 209 } 210 211 /* 212 * if we have been doing sequential I/O, then do some read-ahead 213 */ 214 rbp = NULL; 215 if (seqcount && (lblkno < (origblkno + seqcount))) { 216 /* 217 * we now build the read-ahead buffer if it is desirable. 218 */ 219 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 220 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 221 blkno != -1) { 222 int nblksread; 223 int ntoread = num_ra + 1; 224 nblksread = (origtotread + size - 1) / size; 225 if (seqcount < nblksread) 226 seqcount = nblksread; 227 if (seqcount < ntoread) 228 ntoread = seqcount; 229 if (num_ra) { 230 rbp = cluster_rbuild(vp, filesize, lblkno, 231 blkno, size, ntoread, NULL); 232 } else { 233 rbp = getblk(vp, lblkno, size, 0, 0); 234 rbp->b_flags |= B_ASYNC | B_RAM; 235 rbp->b_iocmd = BIO_READ; 236 rbp->b_blkno = blkno; 237 } 238 } 239 } 240 241 /* 242 * handle the synchronous read 243 */ 244 if (bp) { 245 #if defined(CLUSTERDEBUG) 246 if (rcluster) 247 printf("S(%ld,%ld,%d) ", 248 (long)bp->b_lblkno, bp->b_bcount, seqcount); 249 #endif 250 if ((bp->b_flags & B_CLUSTER) == 0) 251 vfs_busy_pages(bp, 0); 252 bp->b_flags &= ~B_INVAL; 253 bp->b_ioflags &= ~BIO_ERROR; 254 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 255 BUF_KERNPROC(bp); 256 error = VOP_STRATEGY(vp, bp); 257 curproc->p_stats->p_ru.ru_inblock++; 258 } 259 260 /* 261 * and if we have read-aheads, do them too 262 */ 263 if (rbp) { 264 if (error) { 265 rbp->b_flags &= ~B_ASYNC; 266 brelse(rbp); 267 } else if (rbp->b_flags & B_CACHE) { 268 rbp->b_flags &= ~B_ASYNC; 269 bqrelse(rbp); 270 } else { 271 #if defined(CLUSTERDEBUG) 272 if (rcluster) { 273 if (bp) 274 printf("A+(%ld,%ld,%ld,%d) ", 275 (long)rbp->b_lblkno, rbp->b_bcount, 276 (long)(rbp->b_lblkno - origblkno), 277 seqcount); 278 else 279 printf("A(%ld,%ld,%ld,%d) ", 280 (long)rbp->b_lblkno, rbp->b_bcount, 281 (long)(rbp->b_lblkno - origblkno), 282 seqcount); 283 } 284 #endif 285 286 if ((rbp->b_flags & B_CLUSTER) == 0) 287 vfs_busy_pages(rbp, 0); 288 rbp->b_flags &= ~B_INVAL; 289 rbp->b_ioflags &= ~BIO_ERROR; 290 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 291 BUF_KERNPROC(rbp); 292 (void) VOP_STRATEGY(vp, rbp); 293 curproc->p_stats->p_ru.ru_inblock++; 294 } 295 } 296 if (reqbp) 297 return (bufwait(reqbp)); 298 else 299 return (error); 300 } 301 302 /* 303 * If blocks are contiguous on disk, use this to provide clustered 304 * read ahead. We will read as many blocks as possible sequentially 305 * and then parcel them up into logical blocks in the buffer hash table. 306 */ 307 static struct buf * 308 cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 309 struct vnode *vp; 310 u_quad_t filesize; 311 daddr_t lbn; 312 daddr_t blkno; 313 long size; 314 int run; 315 struct buf *fbp; 316 { 317 struct buf *bp, *tbp; 318 daddr_t bn; 319 int i, inc, j; 320 321 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 322 ("cluster_rbuild: size %ld != filesize %ld\n", 323 size, vp->v_mount->mnt_stat.f_iosize)); 324 325 /* 326 * avoid a division 327 */ 328 while ((u_quad_t) size * (lbn + run) > filesize) { 329 --run; 330 } 331 332 if (fbp) { 333 tbp = fbp; 334 tbp->b_iocmd = BIO_READ; 335 } else { 336 tbp = getblk(vp, lbn, size, 0, 0); 337 if (tbp->b_flags & B_CACHE) 338 return tbp; 339 tbp->b_flags |= B_ASYNC | B_RAM; 340 tbp->b_iocmd = BIO_READ; 341 } 342 343 tbp->b_blkno = blkno; 344 if( (tbp->b_flags & B_MALLOC) || 345 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 346 return tbp; 347 348 bp = trypbuf(&cluster_pbuf_freecnt); 349 if (bp == 0) 350 return tbp; 351 352 bp->b_data = (char *)((vm_offset_t)bp->b_data | 353 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 354 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 355 bp->b_iocmd = BIO_READ; 356 bp->b_iodone = cluster_callback; 357 bp->b_blkno = blkno; 358 bp->b_lblkno = lbn; 359 bp->b_offset = tbp->b_offset; 360 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 361 pbgetvp(vp, bp); 362 363 TAILQ_INIT(&bp->b_cluster.cluster_head); 364 365 bp->b_bcount = 0; 366 bp->b_bufsize = 0; 367 bp->b_npages = 0; 368 369 inc = btodb(size); 370 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 371 if (i != 0) { 372 if ((bp->b_npages * PAGE_SIZE) + 373 round_page(size) > vp->v_mount->mnt_iosize_max) 374 break; 375 376 if ((tbp = incore(vp, lbn + i)) != NULL) { 377 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) 378 break; 379 BUF_UNLOCK(tbp); 380 381 for (j = 0; j < tbp->b_npages; j++) 382 if (tbp->b_pages[j]->valid) 383 break; 384 385 if (j != tbp->b_npages) 386 break; 387 388 if (tbp->b_bcount != size) 389 break; 390 } 391 392 tbp = getblk(vp, lbn + i, size, 0, 0); 393 394 if ((tbp->b_flags & B_CACHE) || 395 (tbp->b_flags & B_VMIO) == 0) { 396 bqrelse(tbp); 397 break; 398 } 399 400 for (j = 0;j < tbp->b_npages; j++) 401 if (tbp->b_pages[j]->valid) 402 break; 403 404 if (j != tbp->b_npages) { 405 bqrelse(tbp); 406 break; 407 } 408 409 if ((fbp && (i == 1)) || (i == (run - 1))) 410 tbp->b_flags |= B_RAM; 411 tbp->b_flags |= B_ASYNC; 412 tbp->b_iocmd = BIO_READ; 413 if (tbp->b_blkno == tbp->b_lblkno) { 414 tbp->b_blkno = bn; 415 } else if (tbp->b_blkno != bn) { 416 brelse(tbp); 417 break; 418 } 419 } 420 /* 421 * XXX fbp from caller may not be B_ASYNC, but we are going 422 * to biodone() it in cluster_callback() anyway 423 */ 424 BUF_KERNPROC(tbp); 425 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 426 tbp, b_cluster.cluster_entry); 427 for (j = 0; j < tbp->b_npages; j += 1) { 428 vm_page_t m; 429 m = tbp->b_pages[j]; 430 vm_page_io_start(m); 431 vm_object_pip_add(m->object, 1); 432 if ((bp->b_npages == 0) || 433 (bp->b_pages[bp->b_npages-1] != m)) { 434 bp->b_pages[bp->b_npages] = m; 435 bp->b_npages++; 436 } 437 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 438 tbp->b_pages[j] = bogus_page; 439 } 440 bp->b_bcount += tbp->b_bcount; 441 bp->b_bufsize += tbp->b_bufsize; 442 } 443 444 for(j=0;j<bp->b_npages;j++) { 445 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 446 VM_PAGE_BITS_ALL) 447 bp->b_pages[j] = bogus_page; 448 } 449 if (bp->b_bufsize > bp->b_kvasize) 450 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 451 bp->b_bufsize, bp->b_kvasize); 452 bp->b_kvasize = bp->b_bufsize; 453 454 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 455 (vm_page_t *)bp->b_pages, bp->b_npages); 456 return (bp); 457 } 458 459 /* 460 * Cleanup after a clustered read or write. 461 * This is complicated by the fact that any of the buffers might have 462 * extra memory (if there were no empty buffer headers at allocbuf time) 463 * that we will need to shift around. 464 */ 465 void 466 cluster_callback(bp) 467 struct buf *bp; 468 { 469 struct buf *nbp, *tbp; 470 int error = 0; 471 472 /* 473 * Must propogate errors to all the components. 474 */ 475 if (bp->b_ioflags & BIO_ERROR) 476 error = bp->b_error; 477 478 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 479 /* 480 * Move memory from the large cluster buffer into the component 481 * buffers and mark IO as done on these. 482 */ 483 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 484 tbp; tbp = nbp) { 485 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 486 if (error) { 487 tbp->b_ioflags |= BIO_ERROR; 488 tbp->b_error = error; 489 } else { 490 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 491 tbp->b_flags &= ~B_INVAL; 492 tbp->b_ioflags &= ~BIO_ERROR; 493 } 494 bufdone(tbp); 495 } 496 relpbuf(bp, &cluster_pbuf_freecnt); 497 } 498 499 /* 500 * cluster_wbuild_wb: 501 * 502 * Implement modified write build for cluster. 503 * 504 * write_behind = 0 write behind disabled 505 * write_behind = 1 write behind normal (default) 506 * write_behind = 2 write behind backed-off 507 */ 508 509 static __inline int 510 cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 511 { 512 int r = 0; 513 514 switch(write_behind) { 515 case 2: 516 if (start_lbn < len) 517 break; 518 start_lbn -= len; 519 /* fall through */ 520 case 1: 521 r = cluster_wbuild(vp, size, start_lbn, len); 522 /* fall through */ 523 default: 524 /* fall through */ 525 break; 526 } 527 return(r); 528 } 529 530 /* 531 * Do clustered write for FFS. 532 * 533 * Three cases: 534 * 1. Write is not sequential (write asynchronously) 535 * Write is sequential: 536 * 2. beginning of cluster - begin cluster 537 * 3. middle of a cluster - add to cluster 538 * 4. end of a cluster - asynchronously write cluster 539 */ 540 void 541 cluster_write(bp, filesize, seqcount) 542 struct buf *bp; 543 u_quad_t filesize; 544 int seqcount; 545 { 546 struct vnode *vp; 547 daddr_t lbn; 548 int maxclen, cursize; 549 int lblocksize; 550 int async; 551 552 vp = bp->b_vp; 553 if (vp->v_type == VREG) { 554 async = vp->v_mount->mnt_flag & MNT_ASYNC; 555 lblocksize = vp->v_mount->mnt_stat.f_iosize; 556 } else { 557 async = 0; 558 lblocksize = bp->b_bufsize; 559 } 560 lbn = bp->b_lblkno; 561 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 562 563 /* Initialize vnode to beginning of file. */ 564 if (lbn == 0) 565 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 566 567 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 568 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 569 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 570 if (vp->v_clen != 0) { 571 /* 572 * Next block is not sequential. 573 * 574 * If we are not writing at end of file, the process 575 * seeked to another point in the file since its last 576 * write, or we have reached our maximum cluster size, 577 * then push the previous cluster. Otherwise try 578 * reallocating to make it sequential. 579 * 580 * Change to algorithm: only push previous cluster if 581 * it was sequential from the point of view of the 582 * seqcount heuristic, otherwise leave the buffer 583 * intact so we can potentially optimize the I/O 584 * later on in the buf_daemon or update daemon 585 * flush. 586 */ 587 cursize = vp->v_lastw - vp->v_cstart + 1; 588 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 589 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 590 if (!async && seqcount > 0) { 591 cluster_wbuild_wb(vp, lblocksize, 592 vp->v_cstart, cursize); 593 } 594 } else { 595 struct buf **bpp, **endbp; 596 struct cluster_save *buflist; 597 598 buflist = cluster_collectbufs(vp, bp); 599 endbp = &buflist->bs_children 600 [buflist->bs_nchildren - 1]; 601 if (VOP_REALLOCBLKS(vp, buflist)) { 602 /* 603 * Failed, push the previous cluster 604 * if *really* writing sequentially 605 * in the logical file (seqcount > 1), 606 * otherwise delay it in the hopes that 607 * the low level disk driver can 608 * optimize the write ordering. 609 */ 610 for (bpp = buflist->bs_children; 611 bpp < endbp; bpp++) 612 brelse(*bpp); 613 free(buflist, M_SEGMENT); 614 if (seqcount > 1) { 615 cluster_wbuild_wb(vp, 616 lblocksize, vp->v_cstart, 617 cursize); 618 } 619 } else { 620 /* 621 * Succeeded, keep building cluster. 622 */ 623 for (bpp = buflist->bs_children; 624 bpp <= endbp; bpp++) 625 bdwrite(*bpp); 626 free(buflist, M_SEGMENT); 627 vp->v_lastw = lbn; 628 vp->v_lasta = bp->b_blkno; 629 return; 630 } 631 } 632 } 633 /* 634 * Consider beginning a cluster. If at end of file, make 635 * cluster as large as possible, otherwise find size of 636 * existing cluster. 637 */ 638 if ((vp->v_type == VREG) && 639 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 640 (bp->b_blkno == bp->b_lblkno) && 641 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 642 bp->b_blkno == -1)) { 643 bawrite(bp); 644 vp->v_clen = 0; 645 vp->v_lasta = bp->b_blkno; 646 vp->v_cstart = lbn + 1; 647 vp->v_lastw = lbn; 648 return; 649 } 650 vp->v_clen = maxclen; 651 if (!async && maxclen == 0) { /* I/O not contiguous */ 652 vp->v_cstart = lbn + 1; 653 bawrite(bp); 654 } else { /* Wait for rest of cluster */ 655 vp->v_cstart = lbn; 656 bdwrite(bp); 657 } 658 } else if (lbn == vp->v_cstart + vp->v_clen) { 659 /* 660 * At end of cluster, write it out if seqcount tells us we 661 * are operating sequentially, otherwise let the buf or 662 * update daemon handle it. 663 */ 664 bdwrite(bp); 665 if (seqcount > 1) 666 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 667 vp->v_clen = 0; 668 vp->v_cstart = lbn + 1; 669 } else if (vm_page_count_severe()) { 670 /* 671 * We are low on memory, get it going NOW 672 */ 673 bawrite(bp); 674 } else { 675 /* 676 * In the middle of a cluster, so just delay the I/O for now. 677 */ 678 bdwrite(bp); 679 } 680 vp->v_lastw = lbn; 681 vp->v_lasta = bp->b_blkno; 682 } 683 684 685 /* 686 * This is an awful lot like cluster_rbuild...wish they could be combined. 687 * The last lbn argument is the current block on which I/O is being 688 * performed. Check to see that it doesn't fall in the middle of 689 * the current block (if last_bp == NULL). 690 */ 691 int 692 cluster_wbuild(vp, size, start_lbn, len) 693 struct vnode *vp; 694 long size; 695 daddr_t start_lbn; 696 int len; 697 { 698 struct buf *bp, *tbp; 699 int i, j, s; 700 int totalwritten = 0; 701 int dbsize = btodb(size); 702 703 while (len > 0) { 704 s = splbio(); 705 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 706 ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) || 707 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 708 ++start_lbn; 709 --len; 710 splx(s); 711 continue; 712 } 713 bremfree(tbp); 714 tbp->b_flags &= ~B_DONE; 715 splx(s); 716 717 /* 718 * Extra memory in the buffer, punt on this buffer. 719 * XXX we could handle this in most cases, but we would 720 * have to push the extra memory down to after our max 721 * possible cluster size and then potentially pull it back 722 * up if the cluster was terminated prematurely--too much 723 * hassle. 724 */ 725 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 726 (B_CLUSTEROK | B_VMIO)) || 727 (tbp->b_bcount != tbp->b_bufsize) || 728 (tbp->b_bcount != size) || 729 (len == 1) || 730 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 731 totalwritten += tbp->b_bufsize; 732 bawrite(tbp); 733 ++start_lbn; 734 --len; 735 continue; 736 } 737 738 /* 739 * We got a pbuf to make the cluster in. 740 * so initialise it. 741 */ 742 TAILQ_INIT(&bp->b_cluster.cluster_head); 743 bp->b_bcount = 0; 744 bp->b_bufsize = 0; 745 bp->b_npages = 0; 746 if (tbp->b_wcred != NOCRED) { 747 bp->b_wcred = tbp->b_wcred; 748 crhold(bp->b_wcred); 749 } 750 751 bp->b_blkno = tbp->b_blkno; 752 bp->b_lblkno = tbp->b_lblkno; 753 bp->b_offset = tbp->b_offset; 754 bp->b_data = (char *)((vm_offset_t)bp->b_data | 755 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 756 bp->b_flags |= B_CLUSTER | 757 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 758 bp->b_iodone = cluster_callback; 759 pbgetvp(vp, bp); 760 /* 761 * From this location in the file, scan forward to see 762 * if there are buffers with adjacent data that need to 763 * be written as well. 764 */ 765 for (i = 0; i < len; ++i, ++start_lbn) { 766 if (i != 0) { /* If not the first buffer */ 767 s = splbio(); 768 /* 769 * If the adjacent data is not even in core it 770 * can't need to be written. 771 */ 772 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 773 splx(s); 774 break; 775 } 776 777 /* 778 * If it IS in core, but has different 779 * characteristics, don't cluster with it. 780 */ 781 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 782 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 783 != (B_DELWRI | B_CLUSTEROK | 784 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 785 tbp->b_wcred != bp->b_wcred || 786 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 787 splx(s); 788 break; 789 } 790 791 /* 792 * Check that the combined cluster 793 * would make sense with regard to pages 794 * and would not be too large 795 */ 796 if ((tbp->b_bcount != size) || 797 ((bp->b_blkno + (dbsize * i)) != 798 tbp->b_blkno) || 799 ((tbp->b_npages + bp->b_npages) > 800 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 801 BUF_UNLOCK(tbp); 802 splx(s); 803 break; 804 } 805 /* 806 * Ok, it's passed all the tests, 807 * so remove it from the free list 808 * and mark it busy. We will use it. 809 */ 810 bremfree(tbp); 811 tbp->b_flags &= ~B_DONE; 812 splx(s); 813 } /* end of code for non-first buffers only */ 814 /* check for latent dependencies to be handled */ 815 if ((LIST_FIRST(&tbp->b_dep)) != NULL) 816 buf_start(tbp); 817 /* 818 * If the IO is via the VM then we do some 819 * special VM hackery. (yuck) 820 */ 821 if (tbp->b_flags & B_VMIO) { 822 vm_page_t m; 823 824 if (i != 0) { /* if not first buffer */ 825 for (j = 0; j < tbp->b_npages; j += 1) { 826 m = tbp->b_pages[j]; 827 if (m->flags & PG_BUSY) { 828 bqrelse(tbp); 829 goto finishcluster; 830 } 831 } 832 } 833 834 for (j = 0; j < tbp->b_npages; j += 1) { 835 m = tbp->b_pages[j]; 836 vm_page_io_start(m); 837 vm_object_pip_add(m->object, 1); 838 if ((bp->b_npages == 0) || 839 (bp->b_pages[bp->b_npages - 1] != m)) { 840 bp->b_pages[bp->b_npages] = m; 841 bp->b_npages++; 842 } 843 } 844 } 845 bp->b_bcount += size; 846 bp->b_bufsize += size; 847 848 s = splbio(); 849 bundirty(tbp); 850 tbp->b_flags &= ~B_DONE; 851 tbp->b_ioflags &= ~BIO_ERROR; 852 tbp->b_flags |= B_ASYNC; 853 tbp->b_iocmd = BIO_WRITE; 854 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 855 ++tbp->b_vp->v_numoutput; 856 splx(s); 857 BUF_KERNPROC(tbp); 858 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 859 tbp, b_cluster.cluster_entry); 860 } 861 finishcluster: 862 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 863 (vm_page_t *) bp->b_pages, bp->b_npages); 864 if (bp->b_bufsize > bp->b_kvasize) 865 panic( 866 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 867 bp->b_bufsize, bp->b_kvasize); 868 bp->b_kvasize = bp->b_bufsize; 869 totalwritten += bp->b_bufsize; 870 bp->b_dirtyoff = 0; 871 bp->b_dirtyend = bp->b_bufsize; 872 bawrite(bp); 873 874 len -= i; 875 } 876 return totalwritten; 877 } 878 879 /* 880 * Collect together all the buffers in a cluster. 881 * Plus add one additional buffer. 882 */ 883 static struct cluster_save * 884 cluster_collectbufs(vp, last_bp) 885 struct vnode *vp; 886 struct buf *last_bp; 887 { 888 struct cluster_save *buflist; 889 struct buf *bp; 890 daddr_t lbn; 891 int i, len; 892 893 len = vp->v_lastw - vp->v_cstart + 1; 894 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 895 M_SEGMENT, M_WAITOK); 896 buflist->bs_nchildren = 0; 897 buflist->bs_children = (struct buf **) (buflist + 1); 898 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 899 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 900 buflist->bs_children[i] = bp; 901 if (bp->b_blkno == bp->b_lblkno) 902 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 903 NULL, NULL); 904 } 905 buflist->bs_children[i] = bp = last_bp; 906 if (bp->b_blkno == bp->b_lblkno) 907 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 908 NULL, NULL); 909 buflist->bs_nchildren = i + 1; 910 return (buflist); 911 } 912